diff --git a/Makefile b/Makefile index 5f8587511..402b66cfa 100644 --- a/Makefile +++ b/Makefile @@ -42,9 +42,9 @@ all: test build package build: build-linux-amd64 build-linux-arm64 build-windows-amd64 -build-linux-amd64: build-linux-amd64-lifecycle build-linux-amd64-symlinks build-linux-amd64-launcher -build-linux-arm64: build-linux-arm64-lifecycle build-linux-arm64-symlinks build-linux-arm64-launcher -build-windows-amd64: build-windows-amd64-lifecycle build-windows-amd64-symlinks build-windows-amd64-launcher +build-linux-amd64: build-linux-amd64-lifecycle build-linux-amd64-symlinks build-linux-amd64-launcher build-linux-amd64-extender +build-linux-arm64: build-linux-arm64-lifecycle build-linux-arm64-symlinks build-linux-arm64-launcher build-linux-arm64-extender +build-windows-amd64: build-windows-amd64-lifecycle build-windows-amd64-symlinks build-windows-amd64-launcher build-windows-amd64-extender build-image-linux-amd64: build-linux-amd64 package-linux-amd64 build-image-linux-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.x86-64.tgz @@ -107,6 +107,28 @@ $(BUILD_DIR)/linux-arm64/lifecycle/launcher: $(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3 +build-linux-amd64-extender: $(BUILD_DIR)/linux-amd64/lifecycle/extender + +$(BUILD_DIR)/linux-amd64/lifecycle/extender: export GOOS:=linux +$(BUILD_DIR)/linux-amd64/lifecycle/extender: export GOARCH:=amd64 +$(BUILD_DIR)/linux-amd64/lifecycle/extender: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle +$(BUILD_DIR)/linux-amd64/lifecycle/extender: $(GOFILES) +$(BUILD_DIR)/linux-amd64/lifecycle/extender: + @echo "> Building lifecycle/extender for $(GOOS)/$(GOARCH)..." + mkdir -p $(OUT_DIR) + cd ./extender && $(GOENV) $(GOBUILD) -o $(OUT_DIR)/extender -a . + +build-linux-arm64-extender: $(BUILD_DIR)/linux-arm64/lifecycle/extender + +$(BUILD_DIR)/linux-arm64/lifecycle/extender: export GOOS:=linux +$(BUILD_DIR)/linux-arm64/lifecycle/extender: export GOARCH:=arm64 +$(BUILD_DIR)/linux-arm64/lifecycle/extender: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle +$(BUILD_DIR)/linux-arm64/lifecycle/extender: $(GOFILES) +$(BUILD_DIR)/linux-arm64/lifecycle/extender: + @echo "> Building lifecycle/extender for $(GOOS)/$(GOARCH)..." + mkdir -p $(OUT_DIR) + cd ./extender && $(GOENV) $(GOBUILD) -o $(OUT_DIR)/extender -a . + build-linux-amd64-symlinks: export GOOS:=linux build-linux-amd64-symlinks: export GOARCH:=amd64 build-linux-amd64-symlinks: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle @@ -153,6 +175,18 @@ $(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: @echo "> Building lifecycle/launcher for $(GOOS)/$(GOARCH)..." $(GOBUILD) -o $(OUT_DIR)$/launcher.exe -a .$/cmd$/launcher +# ANTHONY: The following (build extender for windows) does not work +# Someone should get back to this +build-windows-amd64-extender: $(BUILD_DIR)/windows-amd64/lifecycle/extender.exe + +$(BUILD_DIR)/windows-amd64/lifecycle/extender.exe: export GOOS:=windows +$(BUILD_DIR)/windows-amd64/lifecycle/extender.exe: export GOARCH:=amd64 +$(BUILD_DIR)/windows-amd64/lifecycle/extender.exe: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle +$(BUILD_DIR)/windows-amd64/lifecycle/extender.exe: $(GOFILES) +$(BUILD_DIR)/windows-amd64/lifecycle/extender.exe: + @echo "> Building lifecycle/extender for $(GOOS)/$(GOARCH)..." + cd .$/extender && $(GOBUILD) -o $(OUT_DIR)$/extender.exe -a . + build-windows-amd64-symlinks: export GOOS:=windows build-windows-amd64-symlinks: export GOARCH:=amd64 build-windows-amd64-symlinks: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle diff --git a/analyzer.go b/analyzer.go index f2417d83e..6256781e3 100644 --- a/analyzer.go +++ b/analyzer.go @@ -23,7 +23,7 @@ type Analyzer struct { SBOMRestorer layer.SBOMRestorer // Platform API < 0.7 - Buildpacks []buildpack.GroupBuildpack + Buildpacks []buildpack.GroupBuildable Cache Cache LayerMetadataRestorer layer.MetadataRestorer } diff --git a/analyzer_test.go b/analyzer_test.go index ca9abb049..6745da788 100644 --- a/analyzer_test.go +++ b/analyzer_test.go @@ -80,7 +80,7 @@ func testAnalyzerBuilder(platformAPI string) func(t *testing.T, when spec.G, it PreviousImage: image, Logger: &discardLogger, Platform: p, - Buildpacks: []buildpack.GroupBuildpack{ + Buildpacks: []buildpack.GroupBuildable{ {ID: "metadata.buildpack", API: api.Buildpack.Latest().String()}, {ID: "no.cache.buildpack", API: api.Buildpack.Latest().String()}, {ID: "no.metadata.buildpack", API: api.Buildpack.Latest().String()}, @@ -163,7 +163,7 @@ func testAnalyzerBuilder(platformAPI string) func(t *testing.T, when spec.G, it h.AssertNil(t, testCache.SetMetadata(expectedCacheMetadata)) h.AssertNil(t, testCache.Commit()) - analyzer.Buildpacks = append(analyzer.Buildpacks, buildpack.GroupBuildpack{ID: "escaped/buildpack/id", API: api.Buildpack.Latest().String()}) + analyzer.Buildpacks = append(analyzer.Buildpacks, buildpack.GroupBuildable{ID: "escaped/buildpack/id", API: api.Buildpack.Latest().String()}) expectRestoresLayerMetadataIfSupported() }) diff --git a/builder.go b/builder.go index ec24f45b0..af6227531 100644 --- a/builder.go +++ b/builder.go @@ -25,11 +25,11 @@ type BuildEnv interface { List() []string } -type BuildpackStore interface { - Lookup(bpID, bpVersion string) (buildpack.Buildpack, error) +type BuildableStore interface { + Lookup(bpID, bpVersion string) (buildpack.Buildable, error) } -type Buildpack interface { +type Buildable interface { Build(bpPlan buildpack.Plan, config buildpack.BuildConfig, bpEnv buildpack.BuildEnv) (buildpack.BuildResult, error) ConfigFile() *buildpack.Descriptor Detect(config *buildpack.DetectConfig, bpEnv buildpack.BuildEnv) buildpack.DetectRun @@ -44,7 +44,7 @@ type Builder struct { Plan platform.BuildPlan Out, Err io.Writer Logger Logger - BuildpackStore BuildpackStore + BuildableStore BuildableStore } func (b *Builder) Build() (*platform.BuildMetadata, error) { @@ -66,43 +66,42 @@ func (b *Builder) Build() (*platform.BuildMetadata, error) { var bomFiles []buildpack.BOMFile var slices []layers.Slice var labels []buildpack.Label + var dockerfiles []buildpack.Dockerfile - bpEnv := env.NewBuildEnv(os.Environ()) + bEnv := env.NewBuildEnv(os.Environ()) - for _, bp := range b.Group.Group { - b.Logger.Debugf("Running build for buildpack %s", bp) + for _, groupBuildable := range b.Group.Group { // TODO: execute extensions in parallel + b.Logger.Debugf("Running build for buildpack %s", groupBuildable) - b.Logger.Debug("Looking up buildpack") - bpTOML, err := b.BuildpackStore.Lookup(bp.ID, bp.Version) + b.Logger.Debug("Looking up buildable") // TODO: better comment + buildable, err := b.BuildableStore.Lookup(groupBuildable.ID, groupBuildable.Version) if err != nil { return nil, err } b.Logger.Debug("Finding plan") - bpPlan := plan.Find(bp.ID) + bPlan := plan.Find(groupBuildable.ID) - br, err := bpTOML.Build(bpPlan, config, bpEnv) + br, err := buildable.Build(bPlan, config, bEnv) if err != nil { return nil, err } - b.Logger.Debug("Updating buildpack processes") - updateDefaultProcesses(br.Processes, api.MustParse(bp.API), b.Platform.API()) - - bom = append(bom, br.BOM...) - bomFiles = append(bomFiles, br.BOMFiles...) - labels = append(labels, br.Labels...) - plan = plan.Filter(br.MetRequires) - - b.Logger.Debug("Updating process list") + b.Logger.Debug("Updating processes") + updateDefaultProcesses(br.Processes, api.MustParse(groupBuildable.API), b.Platform.API()) warning := processMap.add(br.Processes) if warning != "" { b.Logger.Warn(warning) } + bom = append(bom, br.BOM...) + bomFiles = append(bomFiles, br.BOMFiles...) + labels = append(labels, br.Labels...) + plan = plan.Filter(br.MetRequires) slices = append(slices, br.Slices...) + dockerfiles = append(dockerfiles, br.Dockerfiles...) // TODO: error if buildpack outputs Dockerfiles? - b.Logger.Debugf("Finished running build for buildpack %s", bp) + b.Logger.Debugf("Finished running build for buildpack %s", groupBuildable) } if b.Platform.API().LessThan("0.4") { @@ -126,11 +125,12 @@ func (b *Builder) Build() (*platform.BuildMetadata, error) { b.Logger.Debug("Finished build") return &platform.BuildMetadata{ BOM: bom, + BuildpackDefaultProcessType: processMap.defaultType, Buildpacks: b.Group.Group, + Dockerfiles: dockerfiles, Labels: labels, Processes: procList, Slices: slices, - BuildpackDefaultProcessType: processMap.defaultType, }, nil } diff --git a/builder_test.go b/builder_test.go index 2b22466fa..198904e35 100644 --- a/builder_test.go +++ b/builder_test.go @@ -33,7 +33,7 @@ func TestBuilder(t *testing.T) { } //go:generate mockgen -package testmock -destination testmock/env.go github.com/buildpacks/lifecycle BuildEnv -//go:generate mockgen -package testmock -destination testmock/buildpack_store.go github.com/buildpacks/lifecycle BuildpackStore +//go:generate mockgen -package testmock -destination testmock/buildpack_store.go github.com/buildpacks/lifecycle BuildableStore //go:generate mockgen -package testmock -destination testmock/buildpack.go github.com/buildpacks/lifecycle Buildpack func testBuilder(t *testing.T, when spec.G, it spec.S) { @@ -71,7 +71,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { PlatformDir: platformDir, Platform: platform.NewPlatform(api.Platform.Latest().String()), Group: buildpack.Group{ - Group: []buildpack.GroupBuildpack{ + Group: []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: api.Buildpack.Latest().String(), Homepage: "Buildpack A Homepage"}, {ID: "B", Version: "v2", API: api.Buildpack.Latest().String()}, }, @@ -79,7 +79,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { Out: stdout, Err: stderr, Logger: &log.Logger{Handler: logHandler}, - BuildpackStore: buildpackStore, + BuildableStore: buildpackStore, } config, err = builder.BuildConfig() @@ -99,7 +99,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { builder.Plan = platform.BuildPlan{ Entries: []platform.BuildPlanEntry{ { - Providers: []buildpack.GroupBuildpack{ + Providers: []buildpack.GroupBuildable{ {ID: "A", Version: "v1"}, {ID: "B", Version: "v2"}, }, @@ -108,7 +108,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { }, }, { - Providers: []buildpack.GroupBuildpack{ + Providers: []buildpack.GroupBuildable{ {ID: "A", Version: "v1"}, {ID: "B", Version: "v2"}, }, @@ -117,7 +117,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { }, }, { - Providers: []buildpack.GroupBuildpack{ + Providers: []buildpack.GroupBuildable{ {ID: "B", Version: "v2"}, }, Requires: []buildpack.Require{ @@ -296,7 +296,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { when("build metadata", func() { when("bom", func() { it("should aggregate BOM from each buildpack", func() { - builder.Group.Group = []buildpack.GroupBuildpack{ + builder.Group.Group = []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.5", Homepage: "Buildpack A Homepage"}, {ID: "B", Version: "v2", API: "0.2"}, } @@ -310,7 +310,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { Name: "dep1", Metadata: map[string]interface{}{"version": "v1"}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, }, }, }, nil) @@ -323,7 +323,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { Name: "dep2", Metadata: map[string]interface{}{"version": "v1"}, }, - Buildpack: buildpack.GroupBuildpack{ID: "B", Version: "v2"}, + Buildpack: buildpack.GroupBuildable{ID: "B", Version: "v2"}, }, }, }, nil) @@ -339,7 +339,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { Version: "", Metadata: map[string]interface{}{"version": string("v1")}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, }, { Require: buildpack.Require{ @@ -347,7 +347,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { Version: "", Metadata: map[string]interface{}{"version": string("v1")}, }, - Buildpack: buildpack.GroupBuildpack{ID: "B", Version: "v2"}, + Buildpack: buildpack.GroupBuildable{ID: "B", Version: "v2"}, }, }); s != "" { t.Fatalf("Unexpected:\n%s\n", s) @@ -368,7 +368,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { if err != nil { t.Fatalf("Unexpected error:\n%s\n", err) } - if s := cmp.Diff(metadata.Buildpacks, []buildpack.GroupBuildpack{ + if s := cmp.Diff(metadata.Buildpacks, []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: api.Buildpack.Latest().String(), Homepage: "Buildpack A Homepage"}, {ID: "B", Version: "v2", API: api.Buildpack.Latest().String()}, }); s != "" { @@ -488,7 +488,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { when("multiple default process types", func() { it.Before(func() { - builder.Group.Group = []buildpack.GroupBuildpack{ + builder.Group.Group = []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: api.Buildpack.Latest().String()}, {ID: "B", Version: "v2", API: api.Buildpack.Latest().String()}, {ID: "C", Version: "v3", API: api.Buildpack.Latest().String()}, @@ -568,7 +568,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { when("overriding default process type, with a non-default process type", func() { it.Before(func() { - builder.Group.Group = []buildpack.GroupBuildpack{ + builder.Group.Group = []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: api.Buildpack.Latest().String()}, {ID: "B", Version: "v2", API: api.Buildpack.Latest().String()}, {ID: "C", Version: "v3", API: api.Buildpack.Latest().String()}, @@ -653,7 +653,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { when("there is a web process", func() { when("buildpack API >= 0.6", func() { it.Before(func() { - builder.Group.Group = []buildpack.GroupBuildpack{ + builder.Group.Group = []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: api.Buildpack.Latest().String()}, } }) @@ -696,7 +696,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { when("buildpack api < 0.6", func() { it.Before(func() { - builder.Group.Group = []buildpack.GroupBuildpack{ + builder.Group.Group = []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.5"}, } }) @@ -841,7 +841,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { Name: "dep1", Metadata: map[string]interface{}{"version": string("v1")}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, }, }, }, nil) @@ -861,7 +861,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { Version: "v1", Metadata: map[string]interface{}{"version": string("v1")}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, }, }); s != "" { t.Fatalf("Unexpected:\n%s\n", s) @@ -879,7 +879,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { when("there is a web process", func() { when("buildpack API >= 0.6", func() { it.Before(func() { - builder.Group.Group = []buildpack.GroupBuildpack{ + builder.Group.Group = []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: api.Buildpack.Latest().String()}, } }) @@ -923,7 +923,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) { when("buildpack api < 0.6", func() { it.Before(func() { - builder.Group.Group = []buildpack.GroupBuildpack{ + builder.Group.Group = []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.5"}, } }) diff --git a/buildpack/bom.go b/buildpack/bom.go index 338aaa6c7..405abecab 100644 --- a/buildpack/bom.go +++ b/buildpack/bom.go @@ -8,7 +8,7 @@ import ( ) type BOMValidator interface { - ValidateBOM(GroupBuildpack, []BOMEntry) ([]BOMEntry, error) + ValidateBOM(GroupBuildable, []BOMEntry) ([]BOMEntry, error) } func NewBOMValidator(bpAPI string, logger Logger) BOMValidator { @@ -26,7 +26,7 @@ type defaultBOMValidator struct { logger Logger } -func (v *defaultBOMValidator) ValidateBOM(bp GroupBuildpack, bom []BOMEntry) ([]BOMEntry, error) { +func (v *defaultBOMValidator) ValidateBOM(bp GroupBuildable, bom []BOMEntry) ([]BOMEntry, error) { if err := v.validateBOM(bom); err != nil { return []BOMEntry{}, err } @@ -40,13 +40,13 @@ func (v *defaultBOMValidator) validateBOM(bom []BOMEntry) error { return nil } -func (v *defaultBOMValidator) processBOM(_ GroupBuildpack, _ []BOMEntry) []BOMEntry { +func (v *defaultBOMValidator) processBOM(_ GroupBuildable, _ []BOMEntry) []BOMEntry { return []BOMEntry{} } type v05To06BOMValidator struct{} -func (v *v05To06BOMValidator) ValidateBOM(bp GroupBuildpack, bom []BOMEntry) ([]BOMEntry, error) { +func (v *v05To06BOMValidator) ValidateBOM(bp GroupBuildable, bom []BOMEntry) ([]BOMEntry, error) { if err := v.validateBOM(bom); err != nil { return []BOMEntry{}, err } @@ -62,13 +62,13 @@ func (v *v05To06BOMValidator) validateBOM(bom []BOMEntry) error { return nil } -func (v *v05To06BOMValidator) processBOM(buildpack GroupBuildpack, bom []BOMEntry) []BOMEntry { +func (v *v05To06BOMValidator) processBOM(buildpack GroupBuildable, bom []BOMEntry) []BOMEntry { return WithBuildpack(buildpack, bom) } type legacyBOMValidator struct{} -func (v *legacyBOMValidator) ValidateBOM(bp GroupBuildpack, bom []BOMEntry) ([]BOMEntry, error) { +func (v *legacyBOMValidator) ValidateBOM(bp GroupBuildable, bom []BOMEntry) ([]BOMEntry, error) { if err := v.validateBOM(bom); err != nil { return []BOMEntry{}, err } @@ -87,7 +87,7 @@ func (v *legacyBOMValidator) validateBOM(bom []BOMEntry) error { return nil } -func (v *legacyBOMValidator) processBOM(buildpack GroupBuildpack, bom []BOMEntry) []BOMEntry { +func (v *legacyBOMValidator) processBOM(buildpack GroupBuildable, bom []BOMEntry) []BOMEntry { bom = WithBuildpack(buildpack, bom) for i := range bom { bom[i].convertVersionToMetadata() diff --git a/buildpack/bomfile.go b/buildpack/bomfile.go index 3c0c8384d..5e9b75111 100644 --- a/buildpack/bomfile.go +++ b/buildpack/bomfile.go @@ -65,7 +65,7 @@ func (b *BOMFile) mediaType() string { } } -func validateMediaTypes(bp GroupBuildpack, bomfiles []BOMFile, sbomMediaTypes []string) error { +func validateMediaTypes(bp GroupBuildable, bomfiles []BOMFile, sbomMediaTypes []string) error { contains := func(vs []string, t string) bool { for _, v := range vs { if v == t { @@ -90,7 +90,7 @@ func validateMediaTypes(bp GroupBuildpack, bomfiles []BOMFile, sbomMediaTypes [] return nil } -func (b *Descriptor) processBOMFiles(layersDir string, bp GroupBuildpack, bpLayers map[string]LayerMetadataFile, logger Logger) ([]BOMFile, error) { +func (b *Descriptor) processBOMFiles(layersDir string, bp GroupBuildable, bpLayers map[string]LayerMetadataFile, logger Logger) ([]BOMFile, error) { var ( layerGlob = filepath.Join(layersDir, "*.sbom.*.json") files []BOMFile diff --git a/buildpack/build.go b/buildpack/build.go index 95998a76a..3c1497227 100644 --- a/buildpack/build.go +++ b/buildpack/build.go @@ -38,6 +38,7 @@ type BuildConfig struct { type BuildResult struct { BOM []BOMEntry BOMFiles []BOMFile + Dockerfiles []Dockerfile Labels []Label MetRequires []string Processes []launch.Process @@ -72,21 +73,15 @@ func (b *Descriptor) Build(bpPlan Plan, config BuildConfig, bpEnv BuildEnv) (Bui } } - config.Logger.Debug("Creating plan directory") - planDir, err := ioutil.TempDir("", launch.EscapeID(b.Buildpack.ID)+"-") - if err != nil { - return BuildResult{}, err - } - defer os.RemoveAll(planDir) - config.Logger.Debug("Preparing paths") - bpLayersDir, bpPlanPath, err := preparePaths(b.Buildpack.ID, bpPlan, config.LayersDir, planDir) + bpPlanDir, bpLayersDir, bpPlanPath, err := prepareBuildPaths(config.LayersDir, b.Buildpack.ID, bpPlan) + defer os.RemoveAll(bpPlanDir) if err != nil { return BuildResult{}, err } config.Logger.Debug("Running build command") - if err := b.runBuildCmd(bpLayersDir, bpPlanPath, config, bpEnv); err != nil { + if err := runBuildCmd(b.Dir, bpLayersDir, bpPlanPath, config, bpEnv, b.Buildpack.ClearEnv); err != nil { return BuildResult{}, err } @@ -146,28 +141,34 @@ func (b *Descriptor) processLayers(layersDir string, logger Logger) (map[string] }) } -func preparePaths(bpID string, bpPlan Plan, layersDir, planDir string) (string, string, error) { +func prepareBuildPaths(outputParentDir, bpID string, bpPlan Plan) (bpPlanParentDir, bpOutputDir, bpPlanPath string, err error) { + // plan + if bpPlanParentDir, err = ioutil.TempDir("", launch.EscapeID(bpID)+"-"); err != nil { + return + } bpDirName := launch.EscapeID(bpID) - bpLayersDir := filepath.Join(layersDir, bpDirName) - bpPlanDir := filepath.Join(planDir, bpDirName) - if err := os.MkdirAll(bpLayersDir, 0777); err != nil { - return "", "", err + bpPlanDir := filepath.Join(bpPlanParentDir, bpDirName) // TODO: find out if this intermediate directory needs to exist + if err = os.MkdirAll(bpPlanDir, 0777); err != nil { + return } - if err := os.MkdirAll(bpPlanDir, 0777); err != nil { - return "", "", err + bpPlanPath = filepath.Join(bpPlanDir, "plan.toml") + if err = encoding.WriteTOML(bpPlanPath, bpPlan); err != nil { + return } - bpPlanPath := filepath.Join(bpPlanDir, "plan.toml") - if err := encoding.WriteTOML(bpPlanPath, bpPlan); err != nil { - return "", "", err + + // output + bpOutputDir = filepath.Join(outputParentDir, bpDirName) + if err = os.MkdirAll(bpOutputDir, 0777); err != nil { + return } - return bpLayersDir, bpPlanPath, nil + return bpPlanParentDir, bpOutputDir, bpPlanPath, nil } -func (b *Descriptor) runBuildCmd(bpLayersDir, bpPlanPath string, config BuildConfig, bpEnv BuildEnv) error { +func runBuildCmd(buildpackDir, outputDir, bpPlanPath string, config BuildConfig, bpEnv BuildEnv, clearEnv bool) error { cmd := exec.Command( - filepath.Join(b.Dir, "bin", "build"), - bpLayersDir, + filepath.Join(buildpackDir, "bin", "build"), + outputDir, config.PlatformDir, bpPlanPath, ) // #nosec G204 @@ -176,7 +177,7 @@ func (b *Descriptor) runBuildCmd(bpLayersDir, bpPlanPath string, config BuildCon cmd.Stderr = config.Err var err error - if b.Buildpack.ClearEnv { + if clearEnv { cmd.Env = bpEnv.List() } else { cmd.Env, err = bpEnv.WithPlatform(config.PlatformDir) @@ -184,7 +185,7 @@ func (b *Descriptor) runBuildCmd(bpLayersDir, bpPlanPath string, config BuildCon return err } } - cmd.Env = append(cmd.Env, EnvBuildpackDir+"="+b.Dir) + cmd.Env = append(cmd.Env, EnvBuildpackDir+"="+buildpackDir) if err := cmd.Run(); err != nil { return NewError(err, ErrTypeBuildpack) @@ -235,7 +236,7 @@ func eachLayer(bpLayersDir, buildpackAPI string, fn func(path, api string) (Laye func (b *Descriptor) readOutputFiles(bpLayersDir, bpPlanPath string, bpPlanIn Plan, bpLayers map[string]LayerMetadataFile, logger Logger) (BuildResult, error) { br := BuildResult{} - bpFromBpInfo := GroupBuildpack{ID: b.Buildpack.ID, Version: b.Buildpack.Version} + bpFromBpInfo := GroupBuildable{ID: b.Buildpack.ID, Version: b.Buildpack.Version} // setup launch.toml var launchTOML LaunchTOML @@ -272,20 +273,20 @@ func (b *Descriptor) readOutputFiles(bpLayersDir, bpPlanPath string, bpPlanIn Pl } } else { // read build.toml - var bpBuild BuildTOML + var buildTOML BuildTOML buildPath := filepath.Join(bpLayersDir, "build.toml") - if _, err := toml.DecodeFile(buildPath, &bpBuild); err != nil && !os.IsNotExist(err) { + if _, err := toml.DecodeFile(buildPath, &buildTOML); err != nil && !os.IsNotExist(err) { return BuildResult{}, err } - if _, err := bomValidator.ValidateBOM(bpFromBpInfo, bpBuild.BOM); err != nil { + if _, err := bomValidator.ValidateBOM(bpFromBpInfo, buildTOML.BOM); err != nil { return BuildResult{}, err } // set MetRequires - if err := validateUnmet(bpBuild.Unmet, bpPlanIn); err != nil { + if err := validateUnmet(buildTOML.Unmet, bpPlanIn); err != nil { return BuildResult{}, err } - br.MetRequires = names(bpPlanIn.filter(bpBuild.Unmet).Entries) + br.MetRequires = names(bpPlanIn.filter(buildTOML.Unmet).Entries) // set BOM files br.BOMFiles, err = b.processBOMFiles(bpLayersDir, bpFromBpInfo, bpLayers, logger) @@ -383,7 +384,7 @@ func names(requires []Require) []string { return out } -func WithBuildpack(bp GroupBuildpack, bom []BOMEntry) []BOMEntry { +func WithBuildpack(bp GroupBuildable, bom []BOMEntry) []BOMEntry { var out []BOMEntry for _, entry := range bom { entry.Buildpack = bp.NoAPI().NoHomepage() diff --git a/buildpack/build_test.go b/buildpack/build_test.go index 973b09b7e..b7be36434 100644 --- a/buildpack/build_test.go +++ b/buildpack/build_test.go @@ -807,28 +807,28 @@ func testBuild(t *testing.T, when spec.G, it spec.S) { Name: "some-deprecated-bp-dep", Metadata: map[string]interface{}{"version": "v1"}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, }, { Require: buildpack.Require{ Name: "some-deprecated-bp-replace-version-dep", Metadata: map[string]interface{}{"version": "some-version-new"}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, }, { Require: buildpack.Require{ Name: "some-dep", Metadata: map[string]interface{}{"version": "v1"}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, }, { Require: buildpack.Require{ Name: "some-replace-version-dep", Metadata: map[string]interface{}{"version": "some-version-new"}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, }, }, Labels: nil, @@ -873,21 +873,21 @@ func testBuild(t *testing.T, when spec.G, it spec.S) { Name: "dep-1", Metadata: map[string]interface{}{"version": "v1"}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, }, { Require: buildpack.Require{ Name: "dep-2", Metadata: map[string]interface{}{"version": "v2"}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, }, { Require: buildpack.Require{ Name: "dep-3", Metadata: map[string]interface{}{"version": "v3"}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, }, }); s != "" { t.Fatalf("Unexpected:\n%s\n", s) @@ -1042,21 +1042,21 @@ func testBuild(t *testing.T, when spec.G, it spec.S) { Name: "some-deprecated-bp-replace-version-dep", Metadata: map[string]interface{}{"version": "some-version-new"}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, // no api, no homepage + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, // no api, no homepage }, { Require: buildpack.Require{ Name: "some-dep", Metadata: map[string]interface{}{"version": "v1"}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, // no api, no homepage + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, // no api, no homepage }, { Require: buildpack.Require{ Name: "some-replace-version-dep", Metadata: map[string]interface{}{"version": "some-version-new"}, }, - Buildpack: buildpack.GroupBuildpack{ID: "A", Version: "v1"}, // no api, no homepage + Buildpack: buildpack.GroupBuildable{ID: "A", Version: "v1"}, // no api, no homepage }, }, Labels: []buildpack.Label{}, diff --git a/buildpack/descriptor.go b/buildpack/descriptor.go index 0349dd764..b3f676c18 100644 --- a/buildpack/descriptor.go +++ b/buildpack/descriptor.go @@ -35,7 +35,24 @@ type Info struct { type Order []Group type Group struct { - Group []GroupBuildpack `toml:"group"` + Group []GroupBuildable `toml:"group"` +} + +func (bg Group) Append(group ...Group) Group { + for _, g := range group { + bg.Group = append(bg.Group, g.Group...) + } + return bg +} + +func (bg Group) Filter(useExtensions bool) Group { + ret := Group{Group: []GroupBuildable{}} + for _, gb := range bg.Group { + if gb.Extension == useExtensions { + ret.Group = append(ret.Group, gb) + } + } + return ret } func ReadGroup(path string) (Group, error) { @@ -52,38 +69,32 @@ func ReadOrder(path string) (Order, error) { return order.Order, err } -func (bg Group) Append(group ...Group) Group { - for _, g := range group { - bg.Group = append(bg.Group, g.Group...) - } - return bg +// A GroupBuildable represents a buildpack or extension referenced in a buildpack.toml's [[order.group]]. +// A GroupBuildable buildpack may be a regular buildpack, or a meta buildpack. +type GroupBuildable struct { + API string `toml:"api,omitempty" json:"-"` + Homepage string `toml:"homepage,omitempty" json:"homepage,omitempty"` + ID string `toml:"id" json:"id"` + Version string `toml:"version" json:"version"` + Extension bool `toml:"extension,omitempty" json:"extension,omitempty"` // TODO: check if this is okay, suggested to RFC + Optional bool `toml:"optional,omitempty" json:"optional,omitempty"` } -// A GroupBuildpack represents a buildpack referenced in a buildpack.toml's [[order.group]]. -// It may be a regular buildpack, or a meta buildpack. -type GroupBuildpack struct { - API string `toml:"api,omitempty" json:"-"` - Homepage string `toml:"homepage,omitempty" json:"homepage,omitempty"` - ID string `toml:"id" json:"id"` - Optional bool `toml:"optional,omitempty" json:"optional,omitempty"` - Version string `toml:"version" json:"version"` +func (b GroupBuildable) String() string { + return b.ID + "@" + b.Version } -func (bp GroupBuildpack) String() string { - return bp.ID + "@" + bp.Version -} - -func (bp GroupBuildpack) NoOpt() GroupBuildpack { - bp.Optional = false - return bp +func (b GroupBuildable) NoOpt() GroupBuildable { + b.Optional = false + return b } -func (bp GroupBuildpack) NoAPI() GroupBuildpack { - bp.API = "" - return bp +func (b GroupBuildable) NoAPI() GroupBuildable { + b.API = "" + return b } -func (bp GroupBuildpack) NoHomepage() GroupBuildpack { - bp.Homepage = "" - return bp +func (b GroupBuildable) NoHomepage() GroupBuildable { + b.Homepage = "" + return b } diff --git a/buildpack/descriptor_test.go b/buildpack/descriptor_test.go index 0554b894d..6af704ad5 100644 --- a/buildpack/descriptor_test.go +++ b/buildpack/descriptor_test.go @@ -47,8 +47,8 @@ func testDescriptor(t *testing.T, when spec.G, it spec.S) { t.Fatalf("Unexpected error:\n%s\n", err) } if s := cmp.Diff(actual, buildpack.Order{ - {Group: []buildpack.GroupBuildpack{{ID: "A", Version: "v1"}, {ID: "B", Optional: true}}}, - {Group: []buildpack.GroupBuildpack{{ID: "C"}, {}}}, + {Group: []buildpack.GroupBuildable{{ID: "A", Version: "v1"}, {ID: "B", Optional: true}}}, + {Group: []buildpack.GroupBuildable{{ID: "C"}, {}}}, }); s != "" { t.Fatalf("Unexpected list:\n%s\n", s) } @@ -79,7 +79,7 @@ func testDescriptor(t *testing.T, when spec.G, it spec.S) { t.Fatalf("Unexpected error:\n%s\n", err) } if s := cmp.Diff(actual, buildpack.Group{ - Group: []buildpack.GroupBuildpack{ + Group: []buildpack.GroupBuildable{ {ID: "A", Version: "v1"}, {ID: "B", Optional: true}, }, diff --git a/buildpack/detect.go b/buildpack/detect.go index 53adcaf3a..94da34fae 100644 --- a/buildpack/detect.go +++ b/buildpack/detect.go @@ -37,6 +37,36 @@ type DetectConfig struct { } func (b *Descriptor) Detect(config *DetectConfig, bpEnv BuildEnv) DetectRun { + t := doDetect(config, bpEnv, b.Dir, b.Buildpack.ClearEnv) + if api.MustParse(b.API).Equal(api.MustParse("0.2")) { + if t.hasInconsistentVersions() || t.Or.hasInconsistentVersions() { + t.Err = errors.Errorf(`buildpack %s has a "version" key that does not match "metadata.version"`, b.Buildpack.ID) + t.Code = -1 + } + } + if api.MustParse(b.API).AtLeast("0.3") { + if t.hasDoublySpecifiedVersions() || t.Or.hasDoublySpecifiedVersions() { + t.Err = errors.Errorf(`buildpack %s has a "version" key and a "metadata.version" which cannot be specified together. "metadata.version" should be used instead`, b.Buildpack.ID) + t.Code = -1 + } + } + if api.MustParse(b.API).AtLeast("0.3") { + if t.hasTopLevelVersions() || t.Or.hasTopLevelVersions() { + config.Logger.Warnf(`Warning: buildpack %s has a "version" key. This key is deprecated in build plan requirements in buildpack API 0.3. "metadata.version" should be used instead`, b.Buildpack.ID) + } + } + return t +} + +type DetectRun struct { + BuildPlan + Output []byte `toml:"-"` + Code int `toml:"-"` + Err error `toml:"-"` +} + +func doDetect(config *DetectConfig, bpEnv BuildEnv, buildpackDir string, clearEnv bool) DetectRun { + // prepare paths appDir, err := filepath.Abs(config.AppDir) if err != nil { return DetectRun{Code: -1, Err: err} @@ -56,9 +86,10 @@ func (b *Descriptor) Detect(config *DetectConfig, bpEnv BuildEnv) DetectRun { return DetectRun{Code: -1, Err: err} } + // run detect command out := &bytes.Buffer{} cmd := exec.Command( - filepath.Join(b.Dir, "bin", "detect"), + filepath.Join(buildpackDir, "bin", "detect"), platformDir, planPath, ) // #nosec G204 @@ -66,7 +97,7 @@ func (b *Descriptor) Detect(config *DetectConfig, bpEnv BuildEnv) DetectRun { cmd.Stdout = out cmd.Stderr = out - if b.Buildpack.ClearEnv { + if clearEnv { cmd.Env = bpEnv.List() } else { cmd.Env, err = bpEnv.WithPlatform(config.PlatformDir) @@ -74,7 +105,7 @@ func (b *Descriptor) Detect(config *DetectConfig, bpEnv BuildEnv) DetectRun { return DetectRun{Code: -1, Err: err} } } - cmd.Env = append(cmd.Env, EnvBuildpackDir+"="+b.Dir) + cmd.Env = append(cmd.Env, EnvBuildpackDir+"="+buildpackDir) if err := cmd.Run(); err != nil { if err, ok := err.(*exec.ExitError); ok { @@ -84,34 +115,12 @@ func (b *Descriptor) Detect(config *DetectConfig, bpEnv BuildEnv) DetectRun { } return DetectRun{Code: -1, Err: err, Output: out.Bytes()} } + + // parse output var t DetectRun if _, err := toml.DecodeFile(planPath, &t); err != nil { return DetectRun{Code: -1, Err: err, Output: out.Bytes()} } - if api.MustParse(b.API).Equal(api.MustParse("0.2")) { - if t.hasInconsistentVersions() || t.Or.hasInconsistentVersions() { - t.Err = errors.Errorf(`buildpack %s has a "version" key that does not match "metadata.version"`, b.Buildpack.ID) - t.Code = -1 - } - } - if api.MustParse(b.API).AtLeast("0.3") { - if t.hasDoublySpecifiedVersions() || t.Or.hasDoublySpecifiedVersions() { - t.Err = errors.Errorf(`buildpack %s has a "version" key and a "metadata.version" which cannot be specified together. "metadata.version" should be used instead`, b.Buildpack.ID) - t.Code = -1 - } - } - if api.MustParse(b.API).AtLeast("0.3") { - if t.hasTopLevelVersions() || t.Or.hasTopLevelVersions() { - config.Logger.Warnf(`Warning: buildpack %s has a "version" key. This key is deprecated in build plan requirements in buildpack API 0.3. "metadata.version" should be used instead`, b.Buildpack.ID) - } - } t.Output = out.Bytes() return t } - -type DetectRun struct { - BuildPlan - Output []byte `toml:"-"` - Code int `toml:"-"` - Err error `toml:"-"` -} diff --git a/buildpack/dockerfile.go b/buildpack/dockerfile.go new file mode 100644 index 000000000..34914fc17 --- /dev/null +++ b/buildpack/dockerfile.go @@ -0,0 +1,74 @@ +package buildpack + +import ( + "path/filepath" +) + +type Dockerfile struct { + ExtensionID string `toml:"extension_id"` // TODO: nest [[dockerfiles]] under [[extensions]]? + Path string `toml:"path"` + Type string `toml:"type"` + Args []DockerfileArg `toml:"args"` +} + +type DockerfileArg struct { + Key string `toml:"name"` // TODO: which do we want? + Value string `toml:"value"` +} + +func processDockerfiles(bpOutputDir, extID string, buildArgs, runArgs []DockerfileArg) ([]Dockerfile, error) { + var ( + dockerfileGlob = filepath.Join(bpOutputDir, "*Dockerfile") + dockerfiles []Dockerfile + ) + + matches, err := filepath.Glob(dockerfileGlob) + if err != nil { + return nil, err + } + + for _, m := range matches { + _, filename := filepath.Split(m) + + if filename == "run.Dockerfile" { + dockerfiles = append(dockerfiles, Dockerfile{ + ExtensionID: extID, + Path: m, + Type: "run", + Args: runArgs, + }) + continue + } + + if filename == "build.Dockerfile" { + dockerfiles = append(dockerfiles, Dockerfile{ + ExtensionID: extID, + Path: m, + Type: "build", + Args: buildArgs, + }) + continue + } + + if filename == "Dockerfile" { + dockerfiles = append(dockerfiles, + Dockerfile{ + ExtensionID: extID, + Path: m, + Type: "run", + Args: runArgs, + }, + Dockerfile{ + ExtensionID: extID, + Path: m, + Type: "build", + Args: buildArgs, + }, + ) + continue + } + // ignore other glob matches e.g., some-random.Dockerfile + } + + return dockerfiles, nil +} diff --git a/buildpack/extension.go b/buildpack/extension.go new file mode 100644 index 000000000..117a365eb --- /dev/null +++ b/buildpack/extension.go @@ -0,0 +1,133 @@ +package buildpack + +import ( + "os" + "path/filepath" + + "github.com/BurntSushi/toml" + "github.com/pkg/errors" +) + +type Extension struct { + API string `toml:"api"` + Buildpack Info `toml:"extension"` + Dir string `toml:"-"` + Descriptor *Descriptor +} + +func (e *Extension) ConfigFile() *Descriptor { + if e.Descriptor == nil { + e.Descriptor = &Descriptor{ + API: e.API, + Buildpack: e.Buildpack, + Order: nil, // TODO: check + Dir: e.Dir, + } + } + return e.Descriptor +} + +func (e *Extension) IsMetaBuildpack() bool { + return false +} + +func (e *Extension) String() string { + return e.Buildpack.Name + " " + e.Buildpack.Version +} + +func (e *Extension) Detect(config *DetectConfig, bpEnv BuildEnv) DetectRun { + if _, err := os.Stat(filepath.Join(e.Dir, "bin", "detect")); os.IsNotExist(err) { + config.Logger.Debugf("Passing detect due to missing %s", filepath.Join("bin", "detect")) + return DetectRun{} + } + t := doDetect(config, bpEnv, e.Dir, e.Buildpack.ClearEnv) + if t.hasRequires() || t.Or.hasRequires() { + t.Err = errors.Errorf(`extension %s outputs "requires" to the build plan; only "provides" are permitted`, e.Buildpack.ID) + t.Code = -1 + } + return t +} + +func (e *Extension) Build(bpPlan Plan, config BuildConfig, bpEnv BuildEnv) (BuildResult, error) { + config.Logger.Debugf("Running build for extension %s", e) + + if _, err := os.Stat(filepath.Join(e.Dir, "bin", "build")); os.IsNotExist(err) { + config.Logger.Debugf("Passing build due to missing %s", filepath.Join("bin", "build")) + config.Logger.Debug("Reading pre-populated files") + return e.readOutputFiles(e.Dir, bpPlan, config.Logger) + } + + config.Logger.Debug("Preparing paths") + bpPlanDir, bpOutputDir, bpPlanPath, err := prepareBuildPaths(config.LayersDir, e.Buildpack.ID, bpPlan) + defer os.RemoveAll(bpPlanDir) + if err != nil { + return BuildResult{}, err + } + + config.Logger.Debug("Running build command") + if err := runBuildCmd(e.Dir, bpOutputDir, bpPlanPath, config, bpEnv, e.Buildpack.ClearEnv); err != nil { + return BuildResult{}, err + } + + config.Logger.Debug("Reading output files") + return e.readOutputFiles(bpOutputDir, bpPlan, config.Logger) +} + +func (e *Extension) readOutputFiles(bpOutputDir string, bpPlanIn Plan, logger Logger) (BuildResult, error) { + br := &BuildResult{} + + buildArgs, err := readExtBuildTOML(filepath.Join(bpOutputDir, "build.toml"), br, bpPlanIn) + if err != nil { + return BuildResult{}, errors.Wrap(err, "reading build.toml") + } + + runArgs, err := readExtLaunchTOML(filepath.Join(bpOutputDir, "launch.toml"), br) + if err != nil { + return BuildResult{}, errors.Wrap(err, "reading launch.toml") + } + + dockerfiles, err := processDockerfiles(bpOutputDir, e.Buildpack.ID, buildArgs, runArgs) + if err != nil { + return BuildResult{}, errors.Wrap(err, "processing Dockerfiles") + } + br.Dockerfiles = dockerfiles + + // set BOM files + // br.BOMFiles, err = b.processBOMFiles(bpOutputDir, bpFromBpInfo, bpLayers, logger) // TODO: extract service + // if err != nil { + // return BuildResult{}, err + // } + + return *br, nil +} + +func readExtBuildTOML(path string, br *BuildResult, bpPlanIn Plan) ([]DockerfileArg, error) { + type extBuildTOML struct { // TODO: this should go in files.go + Args []DockerfileArg `toml:"args"` + Unmet []Unmet `toml:"unmet"` + } + var buildTOML extBuildTOML + if _, err := toml.DecodeFile(path, &buildTOML); err != nil && !os.IsNotExist(err) { + return nil, err + } + buildArgs := buildTOML.Args + if err := validateUnmet(buildTOML.Unmet, bpPlanIn); err != nil { + return nil, err + } + br.MetRequires = names(bpPlanIn.filter(buildTOML.Unmet).Entries) + return buildArgs, nil +} + +func readExtLaunchTOML(path string, br *BuildResult) ([]DockerfileArg, error) { + type extLaunchTOML struct { // TODO: this should go in files.go + Args []DockerfileArg `toml:"args"` + Labels []Label `toml:"labels"` + } + var launchTOML extLaunchTOML + if _, err := toml.DecodeFile(path, &launchTOML); err != nil && !os.IsNotExist(err) { + return nil, err + } + br.Labels = launchTOML.Labels + runArgs := launchTOML.Args + return runArgs, nil +} diff --git a/buildpack/files.go b/buildpack/files.go index 4f8afae79..897374965 100644 --- a/buildpack/files.go +++ b/buildpack/files.go @@ -20,7 +20,7 @@ type LaunchTOML struct { type BOMEntry struct { Require - Buildpack GroupBuildpack `toml:"buildpack" json:"buildpack"` + Buildpack GroupBuildable `toml:"buildpack" json:"buildpack"` } type Require struct { @@ -110,6 +110,10 @@ func (p *PlanSections) hasDoublySpecifiedVersions() bool { return false } +func (p *PlanSections) hasRequires() bool { + return len(p.Requires) > 0 +} + func (p *PlanSections) hasTopLevelVersions() bool { for _, req := range p.Requires { if req.hasTopLevelVersions() { @@ -139,6 +143,15 @@ func (p *planSectionsList) hasDoublySpecifiedVersions() bool { return false } +func (p *planSectionsList) hasRequires() bool { + for _, planSection := range *p { + if planSection.hasRequires() { + return true + } + } + return false +} + func (p *planSectionsList) hasTopLevelVersions() bool { for _, planSection := range *p { if planSection.hasTopLevelVersions() { diff --git a/buildpack/layers.go b/buildpack/layers.go index 014b39cd3..19bf53654 100644 --- a/buildpack/layers.go +++ b/buildpack/layers.go @@ -19,11 +19,11 @@ type LayersDir struct { Path string layers []Layer name string - Buildpack GroupBuildpack + Buildpack GroupBuildable Store *StoreTOML } -func ReadLayersDir(layersDir string, bp GroupBuildpack, logger Logger) (LayersDir, error) { +func ReadLayersDir(layersDir string, bp GroupBuildable, logger Logger) (LayersDir, error) { path := filepath.Join(layersDir, launch.EscapeID(bp.ID)) logger.Debugf("Reading buildpack directory: %s", path) bpDir := LayersDir{ diff --git a/buildpack/store.go b/buildpack/store.go index e1aef4c34..6448b4d72 100644 --- a/buildpack/store.go +++ b/buildpack/store.go @@ -8,26 +8,27 @@ import ( "github.com/buildpacks/lifecycle/launch" ) -type Buildpack interface { +// TODO: does store belong in this package? +type Buildable interface { Build(bpPlan Plan, config BuildConfig, bpEnv BuildEnv) (BuildResult, error) ConfigFile() *Descriptor Detect(config *DetectConfig, bpEnv BuildEnv) DetectRun } -type DirBuildpackStore struct { +type BpStore struct { Dir string } -func NewBuildpackStore(dir string) (*DirBuildpackStore, error) { +func NewBuildpackStore(dir string) (*BpStore, error) { dir, err := filepath.Abs(dir) if err != nil { return nil, err } - return &DirBuildpackStore{Dir: dir}, nil + return &BpStore{Dir: dir}, nil } -func (f *DirBuildpackStore) Lookup(bpID, bpVersion string) (Buildpack, error) { - bpTOML := Descriptor{} +func (f *BpStore) Lookup(bpID, bpVersion string) (Buildable, error) { + var bpTOML Descriptor bpPath := filepath.Join(f.Dir, launch.EscapeID(bpID), bpVersion) tomlPath := filepath.Join(bpPath, "buildpack.toml") if _, err := toml.DecodeFile(tomlPath, &bpTOML); err != nil { @@ -36,3 +37,26 @@ func (f *DirBuildpackStore) Lookup(bpID, bpVersion string) (Buildpack, error) { bpTOML.Dir = bpPath return &bpTOML, nil } + +type ExtensionStore struct { + Dir string +} + +func NewExtensionStore(dir string) (*ExtensionStore, error) { + dir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + return &ExtensionStore{Dir: dir}, nil +} + +func (f *ExtensionStore) Lookup(extID, extVersion string) (Buildable, error) { + var extTOML Extension + extPath := filepath.Join(f.Dir, launch.EscapeID(extID), extVersion) + tomlPath := filepath.Join(extPath, "extension.toml") + if _, err := toml.DecodeFile(tomlPath, &extTOML); err != nil { + return nil, err + } + extTOML.Dir = extPath + return &extTOML, nil +} diff --git a/buildpack/store_test.go b/buildpack/store_test.go index 565cea934..6f1f978e4 100644 --- a/buildpack/store_test.go +++ b/buildpack/store_test.go @@ -12,11 +12,11 @@ import ( ) func TestStore(t *testing.T) { - spec.Run(t, "Store", testStore, spec.Report(report.Terminal{})) + spec.Run(t, "BuildableStore", testStore, spec.Report(report.Terminal{})) } func testStore(t *testing.T, when spec.G, it spec.S) { - var store *buildpack.DirBuildpackStore + var store *buildpack.BpStore it.Before(func() { var err error diff --git a/cache_test.go b/cache_test.go index 109424d42..b143f7bcf 100644 --- a/cache_test.go +++ b/cache_test.go @@ -65,7 +65,7 @@ func testCache(t *testing.T, when spec.G, it spec.S) { exporter = &lifecycle.Exporter{ PlatformAPI: api.Platform.Latest(), - Buildpacks: []buildpack.GroupBuildpack{ + Buildpacks: []buildpack.GroupBuildable{ {ID: "buildpack.id", API: api.Buildpack.Latest().String()}, {ID: "other.buildpack.id", API: api.Buildpack.Latest().String()}, }, @@ -291,7 +291,7 @@ func testCache(t *testing.T, when spec.G, it spec.S) { when("buildpack API < 0.6", func() { it.Before(func() { - exporter.Buildpacks = []buildpack.GroupBuildpack{{ID: "old.buildpack.id", API: "0.5"}} + exporter.Buildpacks = []buildpack.GroupBuildable{{ID: "old.buildpack.id", API: "0.5"}} }) when("the layers are valid", func() { it.Before(func() { diff --git a/cmd/flags.go b/cmd/flags.go index 43fbc89aa..dc6fc1942 100644 --- a/cmd/flags.go +++ b/cmd/flags.go @@ -14,6 +14,7 @@ var ( DefaultAppDir = filepath.Join(rootDir, "workspace") DefaultBuildpacksDir = filepath.Join(rootDir, "cnb", "buildpacks") DefaultDeprecationMode = DeprecationModeWarn + DefaultExtensionsDir = filepath.Join(rootDir, "cnb", "ext") DefaultLauncherPath = filepath.Join(rootDir, "cnb", "lifecycle", "launcher"+execExt) DefaultLayersDir = filepath.Join(rootDir, "layers") DefaultLogLevel = "info" @@ -44,6 +45,7 @@ const ( EnvCacheDir = "CNB_CACHE_DIR" EnvCacheImage = "CNB_CACHE_IMAGE" EnvDeprecationMode = "CNB_DEPRECATION_MODE" + EnvExtensionsDir = "CNB_EXTENSIONS_DIR" EnvGID = "CNB_GROUP_ID" EnvGroupPath = "CNB_GROUP_PATH" EnvLaunchCacheDir = "CNB_LAUNCH_CACHE_DIR" @@ -63,7 +65,8 @@ const ( EnvSkipRestore = "CNB_SKIP_RESTORE" // defaults to false EnvStackPath = "CNB_STACK_PATH" EnvUID = "CNB_USER_ID" - EnvUseDaemon = "CNB_USE_DAEMON" // defaults to false + EnvUseDaemon = "CNB_USE_DAEMON" // defaults to false + EnvUseExtensions = "CNB_USE_EXTENSIONS" // defaults to false ) var flagSet = flag.NewFlagSet("lifecycle", flag.ExitOnError) @@ -92,6 +95,10 @@ func FlagCacheImage(cacheImage *string) { flagSet.StringVar(cacheImage, "cache-image", os.Getenv(EnvCacheImage), "cache image tag name") } +func FlagExtensionsDir(extensionsDir *string) { + flagSet.StringVar(extensionsDir, "extensions", EnvOrDefault(EnvExtensionsDir, DefaultExtensionsDir), "path to extensions directory") +} + func FlagGID(gid *int) { flagSet.IntVar(gid, "gid", intEnv(EnvGID), "GID of user's group in the stack's build and run images") } @@ -188,8 +195,12 @@ func FlagUID(uid *int) { flagSet.IntVar(uid, "uid", intEnv(EnvUID), "UID of user in the stack's build and run images") } -func FlagUseDaemon(use *bool) { - flagSet.BoolVar(use, "daemon", BoolEnv(EnvUseDaemon), "export to docker daemon") +func FlagUseDaemon(useDaemon *bool) { + flagSet.BoolVar(useDaemon, "daemon", BoolEnv(EnvUseDaemon), "export to docker daemon") +} + +func FlagUseExtensions(useExtensions *bool) { + flagSet.BoolVar(useExtensions, "use-extensions", BoolEnv(EnvUseExtensions), "run build for extensions") } func FlagVersion(version *bool) { diff --git a/cmd/lifecycle/builder.go b/cmd/lifecycle/builder.go index ae3ca0931..8c85bfbad 100644 --- a/cmd/lifecycle/builder.go +++ b/cmd/lifecycle/builder.go @@ -24,21 +24,25 @@ type buildCmd struct { type buildArgs struct { // inputs needed when run by creator buildpacksDir string + extensionsDir string // TODO: not valid for creator YET layersDir string appDir string platformDir string + useExtensions bool // TODO: not valid for creator YET platform Platform } // DefineFlags defines the flags that are considered valid and reads their values (if provided). func (b *buildCmd) DefineFlags() { + cmd.FlagAppDir(&b.appDir) cmd.FlagBuildpacksDir(&b.buildpacksDir) + cmd.FlagExtensionsDir(&b.extensionsDir) cmd.FlagGroupPath(&b.groupPath) - cmd.FlagPlanPath(&b.planPath) cmd.FlagLayersDir(&b.layersDir) - cmd.FlagAppDir(&b.appDir) + cmd.FlagPlanPath(&b.planPath) cmd.FlagPlatformDir(&b.platformDir) + cmd.FlagUseExtensions(&b.useExtensions) } // Args validates arguments and flags, and fills in default values. @@ -78,22 +82,30 @@ func (b *buildCmd) Exec() error { } func (ba buildArgs) build(group buildpack.Group, plan platform.BuildPlan) error { - buildpackStore, err := buildpack.NewBuildpackStore(ba.buildpacksDir) + var ( + bStore lifecycle.BuildableStore + err error + ) + if ba.useExtensions { + bStore, err = buildpack.NewExtensionStore(ba.extensionsDir) + } else { + bStore, err = buildpack.NewBuildpackStore(ba.buildpacksDir) + } if err != nil { return cmd.FailErrCode(err, ba.platform.CodeFor(platform.BuildError), "build") } builder := &lifecycle.Builder{ AppDir: ba.appDir, - LayersDir: ba.layersDir, + LayersDir: ba.layersDir, // TODO: when there are extensions, this should default to something other than /layers - maybe /layers/config/ext PlatformDir: ba.platformDir, Platform: ba.platform, - Group: group, + Group: group.Filter(ba.useExtensions), Plan: plan, Out: cmd.Stdout, Err: cmd.Stderr, Logger: cmd.DefaultLogger, - BuildpackStore: buildpackStore, + BuildableStore: bStore, } md, err := builder.Build() diff --git a/cmd/lifecycle/detector.go b/cmd/lifecycle/detector.go index 9beb94883..91cdc66b0 100644 --- a/cmd/lifecycle/detector.go +++ b/cmd/lifecycle/detector.go @@ -25,6 +25,7 @@ type detectArgs struct { // inputs needed when run by creator buildpacksDir string appDir string + extensionsDir string layersDir string platformDir string orderPath string @@ -34,13 +35,14 @@ type detectArgs struct { // DefineFlags defines the flags that are considered valid and reads their values (if provided). func (d *detectCmd) DefineFlags() { - cmd.FlagBuildpacksDir(&d.buildpacksDir) cmd.FlagAppDir(&d.appDir) + cmd.FlagBuildpacksDir(&d.buildpacksDir) + cmd.FlagExtensionsDir(&d.extensionsDir) + cmd.FlagGroupPath(&d.groupPath) cmd.FlagLayersDir(&d.layersDir) - cmd.FlagPlatformDir(&d.platformDir) cmd.FlagOrderPath(&d.orderPath) - cmd.FlagGroupPath(&d.groupPath) cmd.FlagPlanPath(&d.planPath) + cmd.FlagPlatformDir(&d.platformDir) } // Args validates arguments and flags, and fills in default values. @@ -85,19 +87,15 @@ func (da detectArgs) detect() (buildpack.Group, platform.BuildPlan, error) { if err != nil { return buildpack.Group{}, platform.BuildPlan{}, cmd.FailErr(err, "read buildpack order file") } - if err := da.verifyBuildpackApis(order); err != nil { + if err := da.verifyBuildableApis(order); err != nil { return buildpack.Group{}, platform.BuildPlan{}, err } - detector, err := lifecycle.NewDetector( - buildpack.DetectConfig{ - AppDir: da.appDir, - PlatformDir: da.platformDir, - Logger: cmd.DefaultLogger, - }, - da.buildpacksDir, - da.platform, - ) + detector, err := lifecycle.NewDetector(buildpack.DetectConfig{ + AppDir: da.appDir, + PlatformDir: da.platformDir, + Logger: cmd.DefaultLogger, + }, da.buildpacksDir, da.extensionsDir, da.platform) if err != nil { return buildpack.Group{}, platform.BuildPlan{}, cmd.FailErr(err, "initialize detector") } @@ -124,18 +122,33 @@ func (da detectArgs) detect() (buildpack.Group, platform.BuildPlan, error) { return group, plan, nil } -func (da detectArgs) verifyBuildpackApis(order buildpack.Order) error { - store, err := buildpack.NewBuildpackStore(da.buildpacksDir) +func (da detectArgs) verifyBuildableApis(order buildpack.Order) error { + bpStore, err := buildpack.NewBuildpackStore(da.buildpacksDir) if err != nil { return err } + var extStore *buildpack.ExtensionStore + if da.extensionsDir != "" { + extStore, err = buildpack.NewExtensionStore(da.extensionsDir) + if err != nil { + return err + } + } for _, group := range order { for _, groupBp := range group.Group { - buildpack, err := store.Lookup(groupBp.ID, groupBp.Version) - if err != nil { - return cmd.FailErr(err, fmt.Sprintf("lookup buildpack.toml for buildpack '%s'", groupBp.String())) + var buildable buildpack.Buildable + if groupBp.Extension { + buildable, err = extStore.Lookup(groupBp.ID, groupBp.Version) + if err != nil { + return cmd.FailErr(err, fmt.Sprintf("lookup extension.toml for extension '%s'", groupBp.String())) + } + } else { + buildable, err = bpStore.Lookup(groupBp.ID, groupBp.Version) + if err != nil { + return cmd.FailErr(err, fmt.Sprintf("lookup buildable.toml for buildpack '%s'", groupBp.String())) + } } - if err := cmd.VerifyBuildpackAPI(groupBp.String(), buildpack.ConfigFile().API); err != nil { + if err := cmd.VerifyBuildpackAPI(groupBp.String(), buildable.ConfigFile().API); err != nil { return err } } diff --git a/detector.go b/detector.go index 7bc41b3f3..174c62726 100644 --- a/detector.go +++ b/detector.go @@ -23,31 +23,40 @@ var ( ) type Resolver interface { - Resolve(done []buildpack.GroupBuildpack, detectRuns *sync.Map) ([]buildpack.GroupBuildpack, []platform.BuildPlanEntry, error) + Resolve(done []buildpack.GroupBuildable, detectRuns *sync.Map) ([]buildpack.GroupBuildable, []platform.BuildPlanEntry, error) } type Detector struct { buildpack.DetectConfig - Platform Platform - Resolver Resolver - Runs *sync.Map - Store BuildpackStore + Platform Platform + Resolver Resolver + Runs *sync.Map + BuildpackStore BuildableStore + ExtensionStore BuildableStore } -func NewDetector(config buildpack.DetectConfig, buildpacksDir string, platform Platform) (*Detector, error) { +func NewDetector(config buildpack.DetectConfig, buildpacksDir string, extensionsDir string, platform Platform) (*Detector, error) { resolver := &DefaultResolver{ Logger: config.Logger, } - store, err := buildpack.NewBuildpackStore(buildpacksDir) + bpStore, err := buildpack.NewBuildpackStore(buildpacksDir) if err != nil { return nil, err } + var extStore BuildableStore + if extensionsDir != "" { + extStore, err = buildpack.NewExtensionStore(extensionsDir) + if err != nil { + return nil, err + } + } return &Detector{ - DetectConfig: config, - Platform: platform, - Resolver: resolver, - Runs: &sync.Map{}, - Store: store, + DetectConfig: config, + Platform: platform, + Resolver: resolver, + Runs: &sync.Map{}, + BuildpackStore: bpStore, + ExtensionStore: extStore, }, nil } @@ -70,7 +79,7 @@ func (d *Detector) DetectOrder(order buildpack.Order) (buildpack.Group, platform return buildpack.Group{Group: bps}, platform.BuildPlan{Entries: entries}, err } -func (d *Detector) detectOrder(order buildpack.Order, done, next []buildpack.GroupBuildpack, optional bool, wg *sync.WaitGroup) ([]buildpack.GroupBuildpack, []platform.BuildPlanEntry, error) { +func (d *Detector) detectOrder(order buildpack.Order, done, next []buildpack.GroupBuildable, optional bool, wg *sync.WaitGroup) ([]buildpack.GroupBuildable, []platform.BuildPlanEntry, error) { ngroup := buildpack.Group{Group: next} buildpackErr := false for _, group := range order { @@ -95,38 +104,53 @@ func (d *Detector) detectOrder(order buildpack.Order, done, next []buildpack.Gro return nil, nil, ErrFailedDetection } -func (d *Detector) detectGroup(group buildpack.Group, done []buildpack.GroupBuildpack, wg *sync.WaitGroup) ([]buildpack.GroupBuildpack, []platform.BuildPlanEntry, error) { - for i, groupBp := range group.Group { - key := groupBp.String() - if hasID(done, groupBp.ID) { +func (d *Detector) detectGroup(group buildpack.Group, done []buildpack.GroupBuildable, wg *sync.WaitGroup) ([]buildpack.GroupBuildable, []platform.BuildPlanEntry, error) { + for i, groupBuildable := range group.Group { + key := groupBuildable.String() + if hasID(done, groupBuildable.ID) { continue } - bp, err := d.Store.Lookup(groupBp.ID, groupBp.Version) + var ( + buildable buildpack.Buildable + err error + ) + if groupBuildable.Extension { + if !groupBuildable.Optional { + // TODO: warn or error? + groupBuildable.Optional = true + } + buildable, err = d.ExtensionStore.Lookup(groupBuildable.ID, groupBuildable.Version) + } else { + buildable, err = d.BuildpackStore.Lookup(groupBuildable.ID, groupBuildable.Version) + } if err != nil { return nil, nil, err } - bpDesc := bp.ConfigFile() - groupBp.API = bpDesc.API - groupBp.Homepage = bpDesc.Buildpack.Homepage + bpDesc := buildable.ConfigFile() + groupBuildable.API = bpDesc.API + groupBuildable.Homepage = bpDesc.Buildpack.Homepage if bpDesc.IsMetaBuildpack() { + if groupBuildable.Extension { + return nil, nil, fmt.Errorf(`extension %s contains an "order" definition which is not allowed`, groupBuildable.ID) + } // TODO: double-check slice safety here // FIXME: cyclical references lead to infinite recursion - return d.detectOrder(bpDesc.Order, done, group.Group[i+1:], groupBp.Optional, wg) + return d.detectOrder(bpDesc.Order, done, group.Group[i+1:], groupBuildable.Optional, wg) } bpEnv := env.NewBuildEnv(os.Environ()) - done = append(done, groupBp) + done = append(done, groupBuildable) wg.Add(1) - go func(key string, bp Buildpack) { + go func(key string, bp Buildable) { if _, ok := d.Runs.Load(key); !ok { d.Runs.Store(key, bp.Detect(&d.DetectConfig, bpEnv)) } wg.Done() - }(key, bp) + }(key, buildable) } wg.Wait() @@ -134,7 +158,7 @@ func (d *Detector) detectGroup(group buildpack.Group, done []buildpack.GroupBuil return d.Resolver.Resolve(done, d.Runs) } -func hasID(bps []buildpack.GroupBuildpack, id string) bool { +func hasID(bps []buildpack.GroupBuildable, id string) bool { for _, bp := range bps { if bp.ID == id { return true @@ -149,7 +173,7 @@ type DefaultResolver struct { // Resolve aggregates the detect output for a group of buildpacks and tries to resolve a build plan for the group. // If any required buildpack in the group failed detection or a build plan cannot be resolved, it returns an error. -func (r *DefaultResolver) Resolve(done []buildpack.GroupBuildpack, detectRuns *sync.Map) ([]buildpack.GroupBuildpack, []platform.BuildPlanEntry, error) { +func (r *DefaultResolver) Resolve(done []buildpack.GroupBuildable, detectRuns *sync.Map) ([]buildpack.GroupBuildable, []platform.BuildPlanEntry, error) { var groupRuns []buildpack.DetectRun for _, bp := range done { t, ok := detectRuns.Load(bp.String()) @@ -238,9 +262,9 @@ func (r *DefaultResolver) Resolve(done []buildpack.GroupBuildpack, detectRuns *s r.Logger.Infof(f, t.ID, t.Version) } - var found []buildpack.GroupBuildpack + var found []buildpack.GroupBuildable for _, r := range trial { - found = append(found, r.GroupBuildpack.NoOpt()) + found = append(found, r.GroupBuildable.NoOpt()) } var plan []platform.BuildPlanEntry for _, dep := range deps { @@ -258,7 +282,7 @@ func (r *DefaultResolver) runTrial(i int, trial detectTrial) (depMap, detectTria retry = false deps = newDepMap(trial) - if err := deps.eachUnmetRequire(func(name string, bp buildpack.GroupBuildpack) error { + if err := deps.eachUnmetRequire(func(name string, bp buildpack.GroupBuildable) error { retry = true if !bp.Optional { r.Logger.Debugf("fail: %s requires %s", bp, name) @@ -271,7 +295,7 @@ func (r *DefaultResolver) runTrial(i int, trial detectTrial) (depMap, detectTria return nil, nil, err } - if err := deps.eachUnmetProvide(func(name string, bp buildpack.GroupBuildpack) error { + if err := deps.eachUnmetProvide(func(name string, bp buildpack.GroupBuildable) error { retry = true if !bp.Optional { r.Logger.Debugf("fail: %s provides unused %s", bp, name) @@ -293,14 +317,14 @@ func (r *DefaultResolver) runTrial(i int, trial detectTrial) (depMap, detectTria } type detectResult struct { - buildpack.GroupBuildpack + buildpack.GroupBuildable buildpack.DetectRun } func (r *detectResult) options() []detectOption { var out []detectOption for i, sections := range append([]buildpack.PlanSections{r.PlanSections}, r.Or...) { - bp := r.GroupBuildpack + bp := r.GroupBuildable bp.Optional = bp.Optional && i == len(r.Or) out = append(out, detectOption{bp, sections}) } @@ -308,6 +332,7 @@ func (r *detectResult) options() []detectOption { } type detectResults []detectResult + type trialFunc func(detectTrial) (depMap, detectTrial, error) func (rs detectResults) runTrials(f trialFunc) (depMap, detectTrial, error) { @@ -332,16 +357,16 @@ func (rs detectResults) runTrialsFrom(prefix detectTrial, f trialFunc) (depMap, } type detectOption struct { - buildpack.GroupBuildpack + buildpack.GroupBuildable buildpack.PlanSections } type detectTrial []detectOption -func (ts detectTrial) remove(bp buildpack.GroupBuildpack) detectTrial { +func (ts detectTrial) remove(bp buildpack.GroupBuildable) detectTrial { var out detectTrial for _, t := range ts { - if t.GroupBuildpack != bp { + if t.GroupBuildable != bp { out = append(out, t) } } @@ -350,8 +375,8 @@ func (ts detectTrial) remove(bp buildpack.GroupBuildpack) detectTrial { type depEntry struct { platform.BuildPlanEntry - earlyRequires []buildpack.GroupBuildpack - extraProvides []buildpack.GroupBuildpack + earlyRequires []buildpack.GroupBuildable + extraProvides []buildpack.GroupBuildable } type depMap map[string]depEntry @@ -360,22 +385,22 @@ func newDepMap(trial detectTrial) depMap { m := depMap{} for _, option := range trial { for _, p := range option.Provides { - m.provide(option.GroupBuildpack, p) + m.provide(option.GroupBuildable, p) } for _, r := range option.Requires { - m.require(option.GroupBuildpack, r) + m.require(option.GroupBuildable, r) } } return m } -func (m depMap) provide(bp buildpack.GroupBuildpack, provide buildpack.Provide) { +func (m depMap) provide(bp buildpack.GroupBuildable, provide buildpack.Provide) { entry := m[provide.Name] entry.extraProvides = append(entry.extraProvides, bp) m[provide.Name] = entry } -func (m depMap) require(bp buildpack.GroupBuildpack, require buildpack.Require) { +func (m depMap) require(bp buildpack.GroupBuildable, require buildpack.Require) { entry := m[require.Name] entry.Providers = append(entry.Providers, entry.extraProvides...) entry.extraProvides = nil @@ -388,7 +413,7 @@ func (m depMap) require(bp buildpack.GroupBuildpack, require buildpack.Require) m[require.Name] = entry } -func (m depMap) eachUnmetProvide(f func(name string, bp buildpack.GroupBuildpack) error) error { +func (m depMap) eachUnmetProvide(f func(name string, bp buildpack.GroupBuildable) error) error { for name, entry := range m { if len(entry.extraProvides) != 0 { for _, bp := range entry.extraProvides { @@ -401,7 +426,7 @@ func (m depMap) eachUnmetProvide(f func(name string, bp buildpack.GroupBuildpack return nil } -func (m depMap) eachUnmetRequire(f func(name string, bp buildpack.GroupBuildpack) error) error { +func (m depMap) eachUnmetRequire(f func(name string, bp buildpack.GroupBuildable) error) error { for name, entry := range m { if len(entry.earlyRequires) != 0 { for _, bp := range entry.earlyRequires { diff --git a/detector_test.go b/detector_test.go index 57cb686d9..23a63adff 100644 --- a/detector_test.go +++ b/detector_test.go @@ -47,7 +47,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { detector.Resolver = resolver buildpackStore = testmock.NewMockBuildpackStore(mockCtrl) - detector.Store = buildpackStore + detector.BuildpackStore = buildpackStore detector.Platform = platform.NewPlatform(api.Platform.Latest().String()) }) @@ -76,7 +76,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { API: "0.2", Order: []buildpack.Group{ { - Group: []buildpack.GroupBuildpack{ + Group: []buildpack.GroupBuildable{ {ID: "A", Version: "v1"}, {ID: "F", Version: "v1"}, {ID: "B", Version: "v1"}, @@ -93,13 +93,13 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { bpF1.EXPECT().ConfigFile().Return(&buildpack.Descriptor{ API: "0.2", Order: []buildpack.Group{ - {Group: []buildpack.GroupBuildpack{ + {Group: []buildpack.GroupBuildable{ {ID: "C", Version: "v1"}, }}, - {Group: []buildpack.GroupBuildpack{ + {Group: []buildpack.GroupBuildable{ {ID: "G", Version: "v1", Optional: true}, }}, - {Group: []buildpack.GroupBuildpack{ + {Group: []buildpack.GroupBuildable{ {ID: "D", Version: "v1"}, }}, }, @@ -113,7 +113,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { bpB1.EXPECT().ConfigFile().Return(&buildpack.Descriptor{API: "0.2"}) bpB1.EXPECT().Detect(gomock.Any(), gomock.Any()) - firstGroup := []buildpack.GroupBuildpack{ + firstGroup := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3"}, {ID: "C", Version: "v1", API: "0.2"}, {ID: "B", Version: "v1", API: "0.2"}, @@ -122,7 +122,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { firstGroup, detector.Runs, ).Return( - []buildpack.GroupBuildpack{}, + []buildpack.GroupBuildable{}, []platform.BuildPlanEntry{}, lifecycle.ErrFailedDetection, ) @@ -132,13 +132,13 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { API: "0.2", Order: []buildpack.Group{ { - Group: []buildpack.GroupBuildpack{ + Group: []buildpack.GroupBuildable{ {ID: "A", Version: "v2"}, {ID: "B", Version: "v2"}, }, }, { - Group: []buildpack.GroupBuildpack{ + Group: []buildpack.GroupBuildable{ {ID: "C", Version: "v2"}, {ID: "D", Version: "v2"}, }, @@ -150,7 +150,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { bpB2.EXPECT().ConfigFile().Return(&buildpack.Descriptor{API: "0.2"}) bpB2.EXPECT().Detect(gomock.Any(), gomock.Any()) - secondGroup := []buildpack.GroupBuildpack{ + secondGroup := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3"}, {ID: "B", Version: "v2", API: "0.2"}, } @@ -158,7 +158,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { secondGroup, detector.Runs, ).Return( - []buildpack.GroupBuildpack{}, + []buildpack.GroupBuildable{}, []platform.BuildPlanEntry{}, lifecycle.ErrFailedDetection, ).After(firstResolve) @@ -174,7 +174,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { buildpackStore.EXPECT().Lookup("B", "v1").Return(bpB1, nil) bpB1.EXPECT().ConfigFile().Return(&buildpack.Descriptor{API: "0.2"}) - thirdGroup := []buildpack.GroupBuildpack{ + thirdGroup := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3"}, {ID: "C", Version: "v2", API: "0.2"}, {ID: "D", Version: "v2", API: "0.2"}, @@ -184,7 +184,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { thirdGroup, detector.Runs, ).Return( - []buildpack.GroupBuildpack{}, + []buildpack.GroupBuildable{}, []platform.BuildPlanEntry{}, lifecycle.ErrFailedDetection, ).After(secondResolve) @@ -192,7 +192,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { buildpackStore.EXPECT().Lookup("B", "v1").Return(bpB1, nil) bpB1.EXPECT().ConfigFile().Return(&buildpack.Descriptor{API: "0.2"}) - fourthGroup := []buildpack.GroupBuildpack{ + fourthGroup := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3"}, {ID: "B", Version: "v1", API: "0.2"}, } @@ -200,7 +200,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { fourthGroup, detector.Runs, ).Return( - []buildpack.GroupBuildpack{}, + []buildpack.GroupBuildable{}, []platform.BuildPlanEntry{}, lifecycle.ErrFailedDetection, ).After(thirdResolve) @@ -212,7 +212,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { buildpackStore.EXPECT().Lookup("B", "v1").Return(bpB1, nil) bpB1.EXPECT().ConfigFile().Return(&buildpack.Descriptor{API: "0.2"}) - fifthGroup := []buildpack.GroupBuildpack{ + fifthGroup := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3"}, {ID: "D", Version: "v1", API: "0.2"}, {ID: "B", Version: "v1", API: "0.2"}, @@ -221,13 +221,13 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { fifthGroup, detector.Runs, ).Return( - []buildpack.GroupBuildpack{}, + []buildpack.GroupBuildable{}, []platform.BuildPlanEntry{}, lifecycle.ErrFailedDetection, ).After(fourthResolve) order := buildpack.Order{ - {Group: []buildpack.GroupBuildpack{{ID: "E", Version: "v1"}}}, + {Group: []buildpack.GroupBuildable{{ID: "E", Version: "v1"}}}, } _, _, err := detector.Detect(order) if err, ok := err.(*buildpack.Error); !ok || err.Type != buildpack.ErrTypeFailedDetection { @@ -254,7 +254,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { API: "0.2", Order: []buildpack.Group{ { - Group: []buildpack.GroupBuildpack{ + Group: []buildpack.GroupBuildable{ {ID: "A", Version: "v1"}, {ID: "F", Version: "v1"}, {ID: "B", Version: "v1"}, @@ -274,13 +274,13 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { bpF1.EXPECT().ConfigFile().Return(&buildpack.Descriptor{ API: "0.2", Order: []buildpack.Group{ - {Group: []buildpack.GroupBuildpack{ + {Group: []buildpack.GroupBuildable{ {ID: "C", Version: "v1"}, }}, - {Group: []buildpack.GroupBuildpack{ + {Group: []buildpack.GroupBuildable{ {ID: "G", Version: "v1", Optional: true}, }}, - {Group: []buildpack.GroupBuildpack{ + {Group: []buildpack.GroupBuildable{ {ID: "D", Version: "v1"}, }}, }, @@ -294,7 +294,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { bpB1.EXPECT().ConfigFile().Return(&buildpack.Descriptor{API: "0.2"}) bpB1.EXPECT().Detect(gomock.Any(), gomock.Any()) - firstGroup := []buildpack.GroupBuildpack{ + firstGroup := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3", Homepage: "Buildpack A Homepage"}, {ID: "C", Version: "v1", API: "0.2"}, {ID: "B", Version: "v1", API: "0.2"}, @@ -303,7 +303,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { firstGroup, detector.Runs, ).Return( - []buildpack.GroupBuildpack{}, + []buildpack.GroupBuildable{}, []platform.BuildPlanEntry{}, lifecycle.ErrFailedDetection, ) @@ -313,13 +313,13 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { API: "0.2", Order: []buildpack.Group{ { - Group: []buildpack.GroupBuildpack{ + Group: []buildpack.GroupBuildable{ {ID: "A", Version: "v2"}, {ID: "B", Version: "v2"}, }, }, { - Group: []buildpack.GroupBuildpack{ + Group: []buildpack.GroupBuildable{ {ID: "C", Version: "v2"}, {ID: "D", Version: "v2"}, }, @@ -331,7 +331,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { bpB2.EXPECT().ConfigFile().Return(&buildpack.Descriptor{API: "0.2"}) bpB2.EXPECT().Detect(gomock.Any(), gomock.Any()) - secondGroup := []buildpack.GroupBuildpack{ + secondGroup := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3", Homepage: "Buildpack A Homepage"}, {ID: "B", Version: "v2", API: "0.2"}, } @@ -339,7 +339,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { secondGroup, detector.Runs, ).Return( - []buildpack.GroupBuildpack{}, + []buildpack.GroupBuildable{}, []platform.BuildPlanEntry{}, lifecycle.ErrFailedDetection, ).After(firstResolve) @@ -355,7 +355,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { buildpackStore.EXPECT().Lookup("B", "v1").Return(bpB1, nil) bpB1.EXPECT().ConfigFile().Return(&buildpack.Descriptor{API: "0.2"}) - thirdGroup := []buildpack.GroupBuildpack{ + thirdGroup := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3", Homepage: "Buildpack A Homepage"}, {ID: "C", Version: "v2", API: "0.2"}, {ID: "D", Version: "v2", API: "0.2"}, @@ -365,7 +365,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { thirdGroup, detector.Runs, ).Return( - []buildpack.GroupBuildpack{}, + []buildpack.GroupBuildable{}, []platform.BuildPlanEntry{}, lifecycle.ErrFailedDetection, ).After(secondResolve) @@ -373,7 +373,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { buildpackStore.EXPECT().Lookup("B", "v1").Return(bpB1, nil) bpB1.EXPECT().ConfigFile().Return(&buildpack.Descriptor{API: "0.2"}) - fourthGroup := []buildpack.GroupBuildpack{ + fourthGroup := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3", Homepage: "Buildpack A Homepage"}, {ID: "B", Version: "v1", API: "0.2"}, } @@ -387,7 +387,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { ).After(thirdResolve) order := buildpack.Order{ - {Group: []buildpack.GroupBuildpack{{ID: "E", Version: "v1"}}}, + {Group: []buildpack.GroupBuildable{{ID: "E", Version: "v1"}}}, } group, plan, err := detector.Detect(order) if err != nil { @@ -395,7 +395,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { } if s := cmp.Diff(group, buildpack.Group{ - Group: []buildpack.GroupBuildpack{ + Group: []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3", Homepage: "Buildpack A Homepage"}, {ID: "B", Version: "v1", API: "0.2"}, }, @@ -419,13 +419,13 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { bpB1.EXPECT().ConfigFile().Return(&buildpack.Descriptor{API: "0.2"}) bpB1.EXPECT().Detect(gomock.Any(), gomock.Any()) - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3"}, {ID: "B", Version: "v1", API: "0.2"}, } resolver.EXPECT().Resolve(group, detector.Runs).Return(group, []platform.BuildPlanEntry{ { - Providers: []buildpack.GroupBuildpack{ + Providers: []buildpack.GroupBuildable{ {ID: "A", Version: "v1"}, }, Requires: []buildpack.Require{ @@ -436,7 +436,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { }, }, { - Providers: []buildpack.GroupBuildpack{ + Providers: []buildpack.GroupBuildable{ {ID: "B", Version: "v1"}, }, Requires: []buildpack.Require{ @@ -460,7 +460,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { if !hasEntries(plan.Entries, []platform.BuildPlanEntry{ { - Providers: []buildpack.GroupBuildpack{ + Providers: []buildpack.GroupBuildable{ {ID: "A", Version: "v1"}, }, Requires: []buildpack.Require{ @@ -468,7 +468,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { }, }, { - Providers: []buildpack.GroupBuildpack{ + Providers: []buildpack.GroupBuildable{ {ID: "B", Version: "v1"}, }, Requires: []buildpack.Require{ @@ -511,7 +511,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { Err: bpBerror, }) - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3"}, {ID: "B", Version: "v1", API: "0.2"}, } @@ -567,11 +567,11 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { bpA1.EXPECT().ConfigFile().Return(&buildpack.Descriptor{API: "0.3"}) bpA1.EXPECT().Detect(gomock.Any(), gomock.Any()) - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3"}, } resolver.EXPECT().Resolve(group, detector.Runs).Return( - []buildpack.GroupBuildpack{}, + []buildpack.GroupBuildable{}, []platform.BuildPlanEntry{}, lifecycle.ErrBuildpack, ) @@ -590,11 +590,11 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { bpA1.EXPECT().ConfigFile().Return(&buildpack.Descriptor{API: "0.3"}) bpA1.EXPECT().Detect(gomock.Any(), gomock.Any()) - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3"}, } resolver.EXPECT().Resolve(group, detector.Runs).Return( - []buildpack.GroupBuildpack{}, + []buildpack.GroupBuildable{}, []platform.BuildPlanEntry{}, lifecycle.ErrFailedDetection, ) @@ -622,7 +622,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { }) it("should fail if the group is empty", func() { - _, _, err := resolver.Resolve([]buildpack.GroupBuildpack{}, &sync.Map{}) + _, _, err := resolver.Resolve([]buildpack.GroupBuildable{}, &sync.Map{}) if err != lifecycle.ErrFailedDetection { t.Fatalf("Unexpected error:\n%s\n", err) } @@ -637,7 +637,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { }) it("should fail if the group has no viable buildpacks, even if no required buildpacks fail", func() { - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", Optional: true}, {ID: "B", Version: "v1", Optional: true}, } @@ -667,7 +667,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { }) it("should fail with specific error if any bp detect fails in an unexpected way", func() { - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", Optional: false}, {ID: "B", Version: "v1", Optional: false}, } @@ -695,7 +695,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { }) it("should not output detect pass and fail as info level", func() { - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", Optional: false}, {ID: "B", Version: "v1", Optional: false}, } @@ -721,7 +721,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { }) it("should output detect errors as info level", func() { - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", Optional: false}, {ID: "B", Version: "v1", Optional: false}, } @@ -753,7 +753,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { }) it("should return a build plan with matched dependencies", func() { - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3", Homepage: "Buildpack A Homepage"}, {ID: "C", Version: "v2", API: "0.2"}, {ID: "D", Version: "v2", API: "0.2"}, @@ -819,14 +819,14 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { if !hasEntries(entries, []platform.BuildPlanEntry{ { - Providers: []buildpack.GroupBuildpack{ + Providers: []buildpack.GroupBuildable{ {ID: "A", Version: "v1"}, {ID: "C", Version: "v2"}, }, Requires: []buildpack.Require{{Name: "dep1"}, {Name: "dep1"}}, }, { - Providers: []buildpack.GroupBuildpack{ + Providers: []buildpack.GroupBuildable{ {ID: "A", Version: "v1"}, {ID: "C", Version: "v2"}, {ID: "D", Version: "v2"}, @@ -854,7 +854,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { }) it("should fail if all requires are not provided first", func() { - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", Optional: true}, {ID: "B", Version: "v1"}, {ID: "C", Version: "v1"}, @@ -911,7 +911,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { }) it("should fail if all provides are not required after", func() { - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1"}, {ID: "B", Version: "v1"}, {ID: "C", Version: "v1", Optional: true}, @@ -968,7 +968,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { }) it("should succeed if unmet provides/requires are optional", func() { - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", Optional: true}, {ID: "B", Version: "v1", API: "0.2"}, {ID: "C", Version: "v1", Optional: true}, @@ -1011,7 +1011,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { t.Fatalf("Unexpected error:\n%s\n", err) } - if s := cmp.Diff(found, []buildpack.GroupBuildpack{ + if s := cmp.Diff(found, []buildpack.GroupBuildable{ {ID: "B", Version: "v1", API: "0.2"}, }); s != "" { t.Fatalf("Unexpected group:\n%s\n", s) @@ -1019,7 +1019,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { if !hasEntries(entries, []platform.BuildPlanEntry{ { - Providers: []buildpack.GroupBuildpack{{ID: "B", Version: "v1"}}, + Providers: []buildpack.GroupBuildable{{ID: "B", Version: "v1"}}, Requires: []buildpack.Require{{Name: "dep-present"}}, }, }) { @@ -1042,7 +1042,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { }) it("should fallback to alternate build plans", func() { - group := []buildpack.GroupBuildpack{ + group := []buildpack.GroupBuildable{ {ID: "A", Version: "v1", Optional: true, API: "0.3", Homepage: "Buildpack A Homepage"}, {ID: "B", Version: "v1", Optional: true, API: "0.2"}, {ID: "C", Version: "v1", API: "0.2"}, @@ -1132,7 +1132,7 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { t.Fatalf("Unexpected error:\n%s\n", err) } - if s := cmp.Diff(found, []buildpack.GroupBuildpack{ + if s := cmp.Diff(found, []buildpack.GroupBuildable{ {ID: "A", Version: "v1", API: "0.3", Homepage: "Buildpack A Homepage"}, {ID: "B", Version: "v1", API: "0.2"}, {ID: "C", Version: "v1", API: "0.2"}, @@ -1142,11 +1142,11 @@ func testDetector(t *testing.T, when spec.G, it spec.S) { if !hasEntries(entries, []platform.BuildPlanEntry{ { - Providers: []buildpack.GroupBuildpack{{ID: "A", Version: "v1"}}, + Providers: []buildpack.GroupBuildable{{ID: "A", Version: "v1"}}, Requires: []buildpack.Require{{Name: "dep1-present"}}, }, { - Providers: []buildpack.GroupBuildpack{{ID: "C", Version: "v1"}}, + Providers: []buildpack.GroupBuildable{{ID: "C", Version: "v1"}}, Requires: []buildpack.Require{{Name: "dep6-present"}}, }, }) { diff --git a/exporter.go b/exporter.go index e75db1e39..fc5696c10 100644 --- a/exporter.go +++ b/exporter.go @@ -32,7 +32,7 @@ type Cache interface { } type Exporter struct { - Buildpacks []buildpack.GroupBuildpack + Buildpacks []buildpack.GroupBuildable LayerFactory LayerFactory Logger Logger PlatformAPI *api.Version diff --git a/exporter_test.go b/exporter_test.go index c6cf57598..58e07bfe3 100644 --- a/exporter_test.go +++ b/exporter_test.go @@ -135,7 +135,7 @@ func testExporter(t *testing.T, when spec.G, it spec.S) { }).AnyTimes() exporter = &lifecycle.Exporter{ - Buildpacks: []buildpack.GroupBuildpack{ + Buildpacks: []buildpack.GroupBuildable{ {ID: "buildpack.id", Version: "1.2.3", API: api.Buildpack.Latest().String()}, {ID: "other.buildpack.id", Version: "4.5.6", API: api.Buildpack.Latest().String(), Optional: false}, }, @@ -290,7 +290,7 @@ func testExporter(t *testing.T, when spec.G, it spec.S) { when("the launch flag is in the top level table", func() { it.Before(func() { - exporter.Buildpacks = []buildpack.GroupBuildpack{{ID: "bad.buildpack.id", API: api.Buildpack.Latest().String()}} + exporter.Buildpacks = []buildpack.GroupBuildable{{ID: "bad.buildpack.id", API: api.Buildpack.Latest().String()}} fakeAppImage.AddPreviousLayer("bad-layer", "") opts.OrigMetadata = platform.LayersMetadata{ Buildpacks: []buildpack.LayersMetadata{{ @@ -1298,14 +1298,14 @@ version = "4.5.6" Name: "dep1", Metadata: map[string]interface{}{"version": string("v1")}, }, - Buildpack: buildpack.GroupBuildpack{ID: "buildpack.id", Version: "1.2.3"}, + Buildpack: buildpack.GroupBuildable{ID: "buildpack.id", Version: "1.2.3"}, }, { Require: buildpack.Require{ Name: "dep2", Metadata: map[string]interface{}{"version": string("v1")}, }, - Buildpack: buildpack.GroupBuildpack{ID: "other.buildpack.id", Version: "4.5.6"}, + Buildpack: buildpack.GroupBuildable{ID: "other.buildpack.id", Version: "4.5.6"}, }, }) }) @@ -1326,7 +1326,7 @@ version = "4.5.6" when("buildpack requires an escaped id", func() { it.Before(func() { - exporter.Buildpacks = []buildpack.GroupBuildpack{{ID: "some/escaped/bp/id", API: api.Buildpack.Latest().String()}} + exporter.Buildpacks = []buildpack.GroupBuildable{{ID: "some/escaped/bp/id", API: api.Buildpack.Latest().String()}} h.RecursiveCopy(t, filepath.Join("testdata", "exporter", "escaped-bpid", "layers"), opts.LayersDir) }) @@ -1407,7 +1407,7 @@ version = "4.5.6" when("buildpack API < 0.6", func() { it.Before(func() { - exporter.Buildpacks = []buildpack.GroupBuildpack{{ID: "old.buildpack.id", API: "0.5"}} + exporter.Buildpacks = []buildpack.GroupBuildable{{ID: "old.buildpack.id", API: "0.5"}} }) when("previous image exists", func() { diff --git a/extender/code/buildpackconfig/buildPackConfig.go b/extender/code/buildpackconfig/buildPackConfig.go new file mode 100644 index 000000000..7bb43c26e --- /dev/null +++ b/extender/code/buildpackconfig/buildPackConfig.go @@ -0,0 +1,387 @@ +package buildpackconfig + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "errors" + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/executor" + image_util "github.com/GoogleContainerTools/kaniko/pkg/image" + fs_util "github.com/GoogleContainerTools/kaniko/pkg/util" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/redhat-buildpacks/poc/kaniko/store" + "github.com/redhat-buildpacks/poc/kaniko/util" + "github.com/sirupsen/logrus" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" +) + +const ( + homeDir = "/" + kanikoDir = "/kaniko" + cacheDir = "/layers/kaniko" + workspaceDir = "/workspace" + defaultDockerFileName = "Dockerfile" + layerTarFileName = "newlayer.tar" + destination = "new_image" + DOCKER_FILE_NAME_ENV_NAME = "DOCKER_FILE_NAME" + IGNORE_PATHS_ENV_NAME = "IGNORE_PATHS" +) + +var ignorePaths = []string{""} + +type BuildPackConfig struct { + LayerPath string + CacheDir string + Destination string + KanikoDir string + WorkspaceDir string + DockerFileName string + Opts config.KanikoOptions + NewImage v1.Image + BuildArgs []string + CnbEnvVars map[string]string + TarPaths []store.TarFile + LayerTarFileName string + HomeDir string + ExtractLayers bool + IgnorePaths []string +} + +func NewBuildPackConfig() *BuildPackConfig { + return &BuildPackConfig{ + LayerPath: "", + CacheDir: cacheDir, + WorkspaceDir: workspaceDir, + KanikoDir: kanikoDir, + HomeDir: homeDir, + LayerTarFileName: layerTarFileName, + Destination: destination, + } +} + +func (b *BuildPackConfig) InitDefaults() { + + logrus.Debug("Check if DOCKER_FILE_NAME env is defined...") + b.DockerFileName = util.GetValFromEnVar(DOCKER_FILE_NAME_ENV_NAME) + if b.DockerFileName == "" { + b.DockerFileName = defaultDockerFileName + } + logrus.Debugf("DockerfileName is: %s", b.DockerFileName) + + logrus.Debug("Check if IGNORE_PATHS env is defined...") + + result := util.GetValFromEnVar(IGNORE_PATHS_ENV_NAME) + if result == "" { + b.IgnorePaths = ignorePaths + } else { + b.IgnorePaths = strings.Split(result, ",") + } + logrus.Debugf("Additional paths to be ignored: %s", b.IgnorePaths) + for _, p := range b.IgnorePaths { + fs_util.AddToDefaultIgnoreList(fs_util.IgnoreListEntry{ + Path: p, + PrefixMatchOnly: false, + }) + } + + logrus.Debug("Checking if CNB_* env var have been declared ...") + b.CnbEnvVars = util.GetCNBEnvVar() + logrus.Debugf("CNB ENV var is: %s", b.CnbEnvVars) + + // Convert the CNB ENV vars to Kaniko BuildArgs + for k, v := range b.CnbEnvVars { + logrus.Debugf("CNB env key: %s, value: %s", k, v) + arg := k + "=" + v + b.BuildArgs = append(b.BuildArgs, arg) + } + + // setup the path to access the Dockerfile within the workspace dir + dockerFilePath := b.WorkspaceDir + "/" + b.DockerFileName + + // init the Kaniko options + b.Opts = config.KanikoOptions{ + CacheOptions: config.CacheOptions{CacheDir: cacheDir}, + DockerfilePath: dockerFilePath, + IgnoreVarRun: true, + NoPush: true, + SrcContext: b.WorkspaceDir, + SnapshotMode: "full", + BuildArgs: b.BuildArgs, + IgnorePaths: b.IgnorePaths, + TarPath: b.LayerTarFileName, + Destinations: []string{b.Destination}, + ForceBuildMetadata: true, + } + + logrus.Debug("KanikoOptions defined") +} + +func (b *BuildPackConfig) BuildDockerFile(opts config.KanikoOptions) (err error) { + + // If we look to the Kaniko code, they are moving under the root dire directory + logrus.Debug("Moving to root dir") + if err := os.Chdir("/"); err != nil { + panic(err) + } + + logrus.Debugf("Building the %s ...", b.DockerFileName) + logrus.Debugf("Options used %+v", opts) + b.NewImage, err = executor.DoBuild(&opts) + if (err != nil) { + return err + } + + // Push the image to its destination + logrus.Info("Push the image to its destination") + err = executor.DoPush(b.NewImage, &opts) + return err +} + +func (b *BuildPackConfig) ExtractLayersFromNewImageToKanikoDir() { + // Get layers + layers, err := b.NewImage.Layers() + if err != nil { + panic(err) + } + logrus.Infof("Generated %d layers", len(layers)) + + tarFiles := []store.TarFile{} + + for _, layer := range layers { + digest, err := layer.Digest() + digest.MarshalText() + if err != nil { + panic(err) + } + + tarFile := store.TarFile{ + Name: digest.String(), + Path: filepath.Join(b.KanikoDir, digest.String()+".tgz"), + } + logrus.Infof("Tar layer file: %s", tarFile.Path) + + // Add the tgz file to the list of the tgz files + tarFiles = append(tarFiles, tarFile) + + // Save the tgz layer file within the Kaniko dir + err = saveLayer(layer, tarFile.Path) + if err != nil { + panic(err) + } + } + b.TarPaths = tarFiles +} + +func (b *BuildPackConfig) ExtractImageTarFile(destinationTarPath string) { + err := b.untar(destinationTarPath, b.Opts.CacheDir,false) + if err != nil { + panic(err) + } +} + +func (b *BuildPackConfig) ExtractTarGZFilesWithoutBaseImage(baseTarGzipFile string) { + fullPath := path.Join(b.Opts.CacheDir,baseTarGzipFile) + for _, f := range util.FilterFiles(b.Opts.CacheDir, ".gz") { + if f != fullPath { + logrus.Infof("Tgz file to be extracted %s", f) + err := b.untar(f, b.Opts.CacheDir, true) + if err != nil { + panic(err) + } + } + } +} + +func (b *BuildPackConfig) CopyTGZFilesToCacheDir() { + // Copy the content of the kanikoDir to the cacheDir + util.Dir(b.KanikoDir, b.CacheDir) +} + +func (b *BuildPackConfig) SaveImageRawManifest() { + rawManifest, err := b.NewImage.RawManifest() + rawManifestFilePath := b.CacheDir + "/manifest.json" + err = ioutil.WriteFile(rawManifestFilePath, rawManifest, 0644) + if err != nil { + panic(err) + } + logrus.Debugf("Manifest file of the new image stored at %s", rawManifestFilePath) +} + +func (b *BuildPackConfig) SaveImageJSONConfig() { + // Get the Image config file + configJSON, err := b.NewImage.ConfigFile() + if err != nil { + panic(err) + } + configPath := filepath.Join(b.KanikoDir, "config.json") + c, err := os.Create(configPath) + if err != nil { + panic(err) + } + defer c.Close() + + err = json.NewEncoder(c).Encode(*configJSON) + if err != nil { + panic(err) + } + logrus.Debugf("Image JSON config file stored at %s", configPath) + + // Log the image json config + // TODO: Add a debug opt to log if needed + // readFileContent(c) +} + +func (b *BuildPackConfig) untar(tarFilePath string, targetDir string, isGzip bool) (err error) { + var gzr io.Reader + // Try first to open the file + f, err := os.Open(tarFilePath) + if err != nil { + return err + } + + logrus.Infof("Creating a reader for: %s", f.Name()) + if isGzip { + gzr, err = gzip.NewReader(f) + if err != nil { + return err + } + } else { + gzr = f + } + + tr := tar.NewReader(gzr) + // Get each tar segment + for true { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + logrus.Infof("ExtractTarGz: Next() failed: %v", err) + } + + // the target location where the dir/file should be created + target := filepath.Join(targetDir, hdr.Name) + logrus.Debugf("File to be extracted: %s", target) + + if b.ExtractLayers { + switch hdr.Typeflag { + case tar.TypeDir: + if _, err := os.Stat(target); err != nil { + // TODO: Should we define a const for the permission + if err := os.Mkdir(target, 0755); err != nil { + logrus.Fatalf("ExtractTarGz: Mkdir() failed: %s", err.Error()) + return err + } + } + case tar.TypeReg: + pathExists := util.FileExists(target) + if pathExists { + logrus.Debugf("ExtractTarGz: %s exists: %t\n", target, pathExists) + } else { + outFile, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(hdr.Mode)) + if err != nil { + logrus.Fatalf("ExtractTarGz: Create() failed: %s", err.Error()) + return err + } + if _, err := io.Copy(outFile, tr); err != nil { + logrus.Fatalf("ExtractTarGz: Copy() failed: %s", err.Error()) + return err + } + // manually close here after each file operation; defering would cause each file close + // to wait until all operations have completed. + logrus.Debugf("File extracted to %s", outFile.Name()) + outFile.Close() + } + + default: + logrus.Debugf( + "ExtractTarGz: unknown type: %c in %s", + hdr.Typeflag, + hdr.Name) + } + } + + } + return nil +} + +func (b *BuildPackConfig) FindBaseImageDigest() v1.Hash { + var digest v1.Hash + + stages, metaArgs, err := dockerfile.ParseStages(&b.Opts) + if err != nil { + panic(err) + } + + kanikoStages, err := dockerfile.MakeKanikoStages(&b.Opts, stages, metaArgs) + if err != nil { + panic(err) + } + + // Check the baseImage and Log the layer digest + for _, kanikoStage := range kanikoStages { + var baseImage v1.Image + logrus.Infof("Kaniko stage is: %s, index: %d", kanikoStage.BaseName, kanikoStage.Index) + + // Retrieve the SourceImage + baseImage, err = image_util.RetrieveSourceImage(kanikoStage, &b.Opts) + + // Get the layers of the Base Image + layers, err := baseImage.Layers() + if err != nil { + panic(err) + } + for _, l := range layers { + digest, err = l.Digest() + if err != nil { + panic(err) + } + logrus.Infof("Layer digest of base image is: %s", digest) + } + } + return digest +} + +func (b *BuildPackConfig) LoadDescriptorAndConfig() (tarball.Manifest, error) { + var m tarball.Manifest + f, err := os.Open(path.Join(b.Opts.CacheDir,"manifest.json")) + if err != nil { + return nil, err + } + + if err := json.NewDecoder(f).Decode(&m); err != nil { + return nil, err + } + + if &m == nil { + return nil, errors.New("no valid manifest.json in tarball") + } + + return m, nil +} + +func saveLayer(layer v1.Layer, path string) error { + layerReader, err := layer.Compressed() + if err != nil { + return err + } + l, err := os.Create(path) + if err != nil { + return err + } + defer l.Close() + _, err = io.Copy(l, layerReader) + if err != nil { + return err + } + return nil +} diff --git a/extender/code/go.mod b/extender/code/go.mod new file mode 100644 index 000000000..4712acac3 --- /dev/null +++ b/extender/code/go.mod @@ -0,0 +1,19 @@ +module github.com/redhat-buildpacks/poc/kaniko + +require ( + github.com/GoogleContainerTools/kaniko v1.7.0 + github.com/google/go-containerregistry v0.4.1-0.20210128200529-19c2b639fab1 + github.com/pkg/errors v0.9.1 + github.com/sirupsen/logrus v1.8.1 + github.com/stretchr/testify v1.6.1 + gotest.tools v2.2.0+incompatible // indirect +) + +replace ( + github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible + github.com/containerd/containerd v1.4.0-0.20191014053712-acdcf13d5eaf => github.com/containerd/containerd v0.0.0-20191014053712-acdcf13d5eaf + github.com/docker/docker v1.14.0-0.20190319215453-e7b5f7dbe98c => github.com/docker/docker v0.0.0-20190319215453-e7b5f7dbe98c + github.com/tonistiigi/fsutil v0.0.0-20190819224149-3d2716dd0a4d => github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1 +) + +go 1.16 diff --git a/extender/code/go.sum b/extender/code/go.sum new file mode 100644 index 000000000..cc69e7cec --- /dev/null +++ b/extender/code/go.sum @@ -0,0 +1,943 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-sdk-for-go v43.0.0+incompatible h1:/wSNCu0e6EsHFR4Qa3vBEBbicaprEHMyyga9g8RTULI= +github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6 h1:5YWtOnckcudzIw8lPPBcWOnmIFWMtHci1ZWAZulMSx0= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.1.0 h1:ISSNzGUh+ZSzizJWOWzs8bwpXIePbGLW4z/AmUFGH5A= +github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= +github.com/GoogleContainerTools/kaniko v1.6.0 h1:bD76vmMQe2PjbPqGNXOPviamUWSgxrz6QR+nQ6EYr78= +github.com/GoogleContainerTools/kaniko v1.6.0/go.mod h1:wn/PwJes1B0D2WCFdz2CsZWFn24dMSTAX+QdWBAd67o= +github.com/GoogleContainerTools/kaniko v1.7.0 h1:fkaibt8WSMLaSxJTs2/eKwv8tsGtRiIUU5r1kiNwp8k= +github.com/GoogleContainerTools/kaniko v1.7.0/go.mod h1:wn/PwJes1B0D2WCFdz2CsZWFn24dMSTAX+QdWBAd67o= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/hcsshim v0.8.5/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.31.6 h1:nKjQbpXhdImctBh1e0iLg9iQW/X297LPPuY/9f92R2k= +github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/containerd v0.0.0-20191014053712-acdcf13d5eaf h1:juYsQtzJOxIaX5TW/3IU7qlvYU/nALKULBOEdiydFd0= +github.com/containerd/containerd v0.0.0-20191014053712-acdcf13d5eaf/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.2.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20181001140422-bd77b46c8352/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c h1:KFbqHhDeaHM7IfFtXHfUHMDaUStpM2YwBR+iJCIOsKk= +github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-cni v0.0.0-20190813230227-49fbd9b210f3/go.mod h1:2wlRxCQdiBY+OcjNg5x8kI+5mEL1fGt25L4IzQHYJsM= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201223015020-a9a0c2d64694 h1:OVQ4FVXeE6OjzuUifzER+7EulqTqw/94oKSqnooEowQ= +github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201223015020-a9a0c2d64694/go.mod h1:E9uVkkBKf0EaC39j2JVW9EzdNhYvpz6eQIjILHebruk= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20190321234815-f40f9c240ab0/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017 h1:2HQmlpI3yI9deH18Q6xiSOIjXD4sLI55Y/gfpa8/558= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.6.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v0.0.0-20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.0.0-20190319215453-e7b5f7dbe98c h1:1Pev8v0EhB6Fbu9FHCLzZD74gJdJk+QVmlbezI6OToM= +github.com/docker/docker v0.0.0-20190319215453-e7b5f7dbe98c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad h1:VXIse57M5C6ezDuCPyq6QmMvEJ2xclYKZ35SfkXdm3E= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 h1:yWHOI+vFjEsAakUTSrtqc/SAHrhSkmn48pqjidZX3QA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-units v0.3.1/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.8.0-dev.2.0.20190604151032-3c26b4e7495e/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docker/swarmkit v1.12.1-0.20180726190244-7567d47988d8 h1:ASmFyV8Sc6XrH1ng0TBPpYLspA8b7qRT84IbrQY1jSY= +github.com/docker/swarmkit v1.12.1-0.20180726190244-7567d47988d8/go.mod h1:n3Z4lIEl7g261ptkGDBcYi/3qBMDl9csaAhwi2MPejs= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/genuinetools/bpfd v0.0.2-0.20190525234658-c12d8cd9aac8 h1:4gvqvgeXA3WeVPFfudNMwxdSLT2o7ZOQQUFPeEa86aM= +github.com/genuinetools/bpfd v0.0.2-0.20190525234658-c12d8cd9aac8/go.mod h1:O3IeOSRt7TXBSKb3hmOpQdlusALsEbNF+niGBqgKCs0= +github.com/genuinetools/pkg v0.0.0-20181004225747-e152a0f47ee4/go.mod h1:XTcrCYlXPxnxL2UpnwuRn7tcaTn9HAhxFoFJucootk8= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.4.1-0.20210128200529-19c2b639fab1 h1:o2ykCuuhHeUwtzNg89pH2hi+821aqjLWkaREVR3ziTQ= +github.com/google/go-containerregistry v0.4.1-0.20210128200529-19c2b639fab1/go.mod h1:GU9FUA/X9rd2cV3ZoUNaWihp27tki6/38EsVzL2Dyzc= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20210208222243-cbafe638a7a9 h1:o2juREFxGuJb49sLuxAT0hvKVlkaEVoUHNT4lIW/fp4= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20210208222243-cbafe638a7a9/go.mod h1:n9wRxRfKkHy6ZFyj0jJQHw11P+mGLnED4sqegwrXxDk= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20150127133951-6f45313302b9/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= +github.com/google/slowjam v1.0.0/go.mod h1:mNktULbvWfYVMKKmpt94Rp3jMtmhQZLS0iR+W84S0mM= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 h1:yxxFgVz31vFoKKTtRUNbXLNe4GFnbLKqg+0N7yG42L8= +github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71/go.mod h1:kbfItVoBJwCfKXDXN4YoAXjxcFVZ7MRrJzyTX6H4giE= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c/go.mod h1:fHzc09UnyJyqyW+bFuq864eh+wC7dj65aXmXLRe5to0= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/iovisor/gobpf v0.0.0-20190329163444-e0d8d785d368/go.mod h1:+5U5qu5UOu8YJ5oHVLvWKH7/Dr5QNHU7mZ2RfPEeXg8= +github.com/ishidawataru/sctp v0.0.0-20180213033435-07191f837fed/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= +github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea/go.mod h1:QMdK4dGB3YhEW2BmA1wgGpPYI3HZy/5gD705PXKUVSg= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= +github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/maruel/panicparse v1.5.0/go.mod h1:aOutY/MUjdj80R0AEVI9qE2zHqig+67t2ffUDDiLzAM= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw= +github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/minio/highwayhash v1.0.0 h1:iMSDhgUILCr0TNm8LWlSjF8N0ZIj2qbO8WHp6Q/J2BA= +github.com/minio/highwayhash v1.0.0/go.mod h1:xQboMTeM9nY9v/LlAOxFctujiv5+Aq2hR5dxBpaMbdc= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/buildkit v0.0.0-20191111154543-00bfbab0390c h1:QsHXQZ/5EQsj9IIXVVmTWaeEM0VAhZIrEMJ7XKB1Qu0= +github.com/moby/buildkit v0.0.0-20191111154543-00bfbab0390c/go.mod h1:UjmFBX/gZo+/+B1YEiH9Or4s7wBgwvOHhidh3rOtCXw= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.0.0-rc6/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190621203724-f4982d86f7fd h1:w9DJ/JL7fK4VjMoGo4e9gsq2xRhZThNI4PFuAwN8dJ0= +github.com/opencontainers/runc v1.0.0-rc8.0.20190621203724-f4982d86f7fd/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v0.0.0-20180909173843-eba862dc2470/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.0.0-rc1 h1:Q70KvmpJSrYzryl/d0tC3vWUiTn23cSdStKodlokEPs= +github.com/opencontainers/selinux v1.0.0-rc1/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= +github.com/opentracing-contrib/go-stdlib v0.0.0-20171029140428-b1a47cfbdd75/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing/opentracing-go v0.0.0-20171003133519-1361b9cd60be/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/otiai10/copy v1.0.2 h1:DDNipYy6RkIkjMwy+AWzgKiNTyj2RUI9yEMeETEpVyc= +github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/mint v1.3.0 h1:Ady6MKVezQwHBkGzLFbrsywyp09Ah7rkmfjV3Bcr5uc= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.3/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stevvooe/resumable v0.0.0-20180830230917-22b14a53ba50/go.mod h1:1pdIZTAHUz+HDKDVZ++5xg/duPlhKAIzw9qy42CWYp4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1 h1:WRlNtJ2whFMKo95/e6uaNuAnn5TxLcMzczqMcfbIDxo= +github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1/go.mod h1:hP47OZfgT1aNVDJj28EnEKaKg6mjPEoS5Tb4BsWCTPs= +github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= +github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v1.2.1/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/vbatts/tar-split v0.10.2 h1:CXd7HEKGkTLjBMinpObcJZU5Hm8EKlor2a1JtX6msXQ= +github.com/vbatts/tar-split v0.10.2/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= +github.com/vdemeester/k8s-pkg-credentialprovider v1.19.7 h1:MJ5fV2Z0OyIuPvFVs0vi6VjTjxpdK1QT8oX/aWiUjYM= +github.com/vdemeester/k8s-pkg-credentialprovider v1.19.7/go.mod h1:K2nMO14cgZitdwBqdQps9tInJgcaXcU/7q5F59lpbNI= +github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181005133103-4497e2df6f9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.1-0.20200106000736-b8fc810ca6b5/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.1/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece h1:1YM0uhfumvoDu9sx8+RyWwTI63zoCQvI23IYFRlvte0= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.19.7 h1:MpHhls03C2pyzoYcpbe4QqYiiZjdvW+tuWq6TbjV14Y= +k8s.io/api v0.19.7/go.mod h1:KTryDUT3l6Mtv7K2J2486PNL9DBns3wOYTkGR+iz63Y= +k8s.io/apimachinery v0.19.7 h1:nTaEnYVH+i//aPgMA0zTEV2lfVLCV9LextqVd67mulc= +k8s.io/apimachinery v0.19.7/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= +k8s.io/apiserver v0.19.7/go.mod h1:DmWVQggNePspa+vSsVytVbS3iBSDTXdJVt0akfHacKk= +k8s.io/client-go v0.19.7 h1:SoJ4mzZ9LyXBGDe8MmpMznw0CwQ1ITWgsmG7GixvhUU= +k8s.io/client-go v0.19.7/go.mod h1:iytGI7S3kmv6bWnn+bSQUE4VlrEi4YFssvVB7J7Hvqg= +k8s.io/cloud-provider v0.19.7/go.mod h1:aO/VpUwkG+JQN7ZXc5WBLZ5NBXuq/Y5B6vri6U94PZ8= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= +k8s.io/component-base v0.19.7 h1:ZXS2VRWOWBOc2fTd1zjzhi/b/mkqFT9FDqiNsn1cH30= +k8s.io/component-base v0.19.7/go.mod h1:YX8spPBgwl3I6UGcSdQiEMAqRMSUsGQOW7SEr4+Qa3U= +k8s.io/csi-translation-lib v0.19.7/go.mod h1:WghizPQuzuygr2WdpgN2EjcNpDD2V4EAbxFXsgHgSBk= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.5.0 h1:8mOnjf1RmUPW6KRqQCfYSZq/K20Unmp3IhuZUhxl8KI= +k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20210113233702-8566a335510f h1:ZcWbnalfwIst131Zff7dGd1HQdt+NA9q7z9Fi0vbsHY= +k8s.io/kube-openapi v0.0.0-20210113233702-8566a335510f/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/legacy-cloud-providers v0.19.7 h1:YJ/l/8/Hn56I9m1cudK8aNypRA/NvI/hYhg8fo/CTus= +k8s.io/legacy-cloud-providers v0.19.7/go.mod h1:dsZk4gH9QIwAtHQ8CK0Ps257xlfgoXE3tMkMNhW2xDU= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/extender/code/logging/logging.go b/extender/code/logging/logging.go new file mode 100644 index 000000000..ffe81c1b0 --- /dev/null +++ b/extender/code/logging/logging.go @@ -0,0 +1,48 @@ +package logging + +import ( + "fmt" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // Text format + FormatText = "text" + // Colored text format + FormatColor = "color" + // JSON format + FormatJSON = "json" +) + +// Configure sets the logrus logging level and formatter +func Configure(level, format string, logTimestamp bool) error { + lvl, err := logrus.ParseLevel(level) + if err != nil { + return errors.Wrap(err, "parsing log level") + } + logrus.SetLevel(lvl) + + var formatter logrus.Formatter + switch format { + case FormatText: + formatter = &logrus.TextFormatter{ + DisableColors: true, + FullTimestamp: logTimestamp, + } + case FormatColor: + formatter = &logrus.TextFormatter{ + ForceColors: true, + FullTimestamp: logTimestamp, + } + case FormatJSON: + formatter = &logrus.JSONFormatter{} + default: + return fmt.Errorf("not a valid log format: %q. Please specify one of (text, color, json)", format) + } + logrus.SetFormatter(formatter) + + return nil +} + diff --git a/extender/code/main.go b/extender/code/main.go new file mode 100644 index 000000000..7547df987 --- /dev/null +++ b/extender/code/main.go @@ -0,0 +1,202 @@ +package main + +import ( + "fmt" + cfg "github.com/redhat-buildpacks/poc/kaniko/buildpackconfig" + "github.com/redhat-buildpacks/poc/kaniko/logging" + util "github.com/redhat-buildpacks/poc/kaniko/util" + logrus "github.com/sirupsen/logrus" + "os" + "path" + "strconv" + "strings" + "sync" + "syscall" + "time" +) + +const ( + LOGGING_LEVEL_ENV_NAME = "LOGGING_LEVEL" + LOGGING_FORMAT_ENV_NAME = "LOGGING_FORMAT" + LOGGING_TIMESTAMP_ENV_NAME = "LOGGING_TIMESTAMP" + EXTRACT_LAYERS_ENV_NAME = "EXTRACT_LAYERS" + FILES_TO_SEARCH_ENV_NAME = "FILES_TO_SEARCH" + + DefaultLevel = "info" + DefaultLogTimestamp = false + DefaultLogFormat = "text" +) + +var ( + logLevel string // Log level (trace, debug, info, warn, error, fatal, panic) + logFormat string // Log format (text, color, json) + logTimestamp bool // Timestamp in log output + extractLayers bool // Extract layers from tgz files. Default is false + filesToSearch []string // List of files to search to check if they exist under the updated FS +) + +func init() { + envVal := util.GetValFromEnVar(FILES_TO_SEARCH_ENV_NAME) + if envVal != "" { + filesToSearch = strings.Split(envVal, ",") + } + + logLevel = util.GetValFromEnVar(LOGGING_LEVEL_ENV_NAME) + if logLevel == "" { + logLevel = DefaultLevel + } + + logFormat = util.GetValFromEnVar(LOGGING_FORMAT_ENV_NAME) + if logFormat == "" { + logFormat = DefaultLogFormat + } + + loggingTimeStampStr := util.GetValFromEnVar(LOGGING_TIMESTAMP_ENV_NAME) + if loggingTimeStampStr == "" { + logTimestamp = DefaultLogTimestamp + } else { + v, err := strconv.ParseBool(loggingTimeStampStr) + if err != nil { + logrus.Fatalf("logTimestamp bool assignment failed %s", err) + } else { + logTimestamp = v + } + } + + extractLayersStr := util.GetValFromEnVar(EXTRACT_LAYERS_ENV_NAME) + if extractLayersStr == "" { + logrus.Info("The layered tzg files will NOT be extracted to the home dir ...") + extractLayers = false + } else { + v, err := strconv.ParseBool(extractLayersStr) + if err != nil { + logrus.Fatalf("extractLayers bool assignment failed %s", err) + } else { + extractLayers = v + logrus.Info("The layered tzg files will be extracted to the home dir ...") + } + } + + if err := logging.Configure(logLevel, logFormat, logTimestamp); err != nil { + panic(err) + } +} + +func main() { + if _, ok := os.LookupEnv("DEBUG"); ok && (len(os.Args) <= 1 || os.Args[1] != "from-debugger") { + args := []string { + "--listen=:2345", + "--headless=true", + "--api-version=2", + "--accept-multiclient", + "exec", + "/kaniko-app", "from-debugger", + } + err := syscall.Exec("/usr/local/bin/dlv", append([]string{"/usr/local/bin/dlv"}, args...), os.Environ()) + if err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + } + logrus.Info("Starting the Kaniko application to process a Dockerfile ...") + + // Create a buildPackConfig and set the default values + logrus.Info("Initialize the BuildPackConfig and set the defaults values ...") + b := cfg.NewBuildPackConfig() + b.InitDefaults() + b.ExtractLayers = extractLayers + + logrus.Infof("Kaniko dir: %s", b.KanikoDir) + logrus.Infof("Workspace dir: %s", b.WorkspaceDir) + logrus.Infof("Cache dir: %s", b.CacheDir) + logrus.Infof("Dockerfile name: %s", b.DockerFileName) + logrus.Infof("Extract layer files ? %v", extractLayers) + + // Launch a timer to measure the time needed to parse/copy/extract + start := time.Now() + + // Build the Dockerfile + logrus.Infof("Building the %s", b.DockerFileName) + err := b.BuildDockerFile() + if err != nil { + panic(err) + } + err = reapChildProcesses() + if err != nil { + panic(err) + } + + // Log the content of the Kaniko dir + logrus.Infof("Reading dir content of: %s", b.KanikoDir) + util.ReadFilesFromPath(b.KanikoDir) + + // Copy the tgz layer file to the Cache dir + srcPath := path.Join("/", b.LayerTarFileName) + dstPath := path.Join(b.CacheDir, b.LayerTarFileName) + logrus.Infof("Copy the %s file to the %s dir ...", srcPath, dstPath) + err = util.File(srcPath, dstPath) + if (err != nil) { + panic(err) + } + + logrus.Infof("Extract the content of the tarball file %s under the cache %s",b.Opts.TarPath,b.Opts.CacheDir) + b.ExtractImageTarFile(dstPath) + logrus.Info("Extract the layer file(s)") + //baseImageHash := b.FindBaseImageDigest() + //logrus.Infof("Hash of the base image is: %s",baseImageHash.String()) + descriptor, err := b.LoadDescriptorAndConfig() + if (err != nil) { + panic(err) + } + + logrus.Infof("%+v\n", descriptor) + layers := descriptor[0].Layers + /* for i := 1; i < len(layers); i++ { + logrus.Infof("Layer: %s", layers[i]) + }*/ + b.ExtractTarGZFilesWithoutBaseImage(layers[0]) + + // Check if files exist + if (len(filesToSearch) > 0) { + util.FindFiles(filesToSearch) + } + + // Time elapsed is ... + logrus.Infof("Time elapsed: %s",time.Since(start)) +} + +func reapChildProcesses() error { + procDir, err := os.Open("/proc") + if err != nil { + return err + } + + procDirs, err := procDir.Readdirnames(-1) + if err != nil { + return err + } + + tid := os.Getpid() + + var wg sync.WaitGroup + for _, dirName := range procDirs { + pid, err := strconv.Atoi(dirName) + if err == nil && pid != 1 && pid != tid { + p, err := os.FindProcess(pid) + if err != nil { + continue + } + err = p.Signal(syscall.SIGTERM) + if err != nil { + continue + } + wg.Add(1) + go func() { + defer wg.Done() + p.Wait() + }() + } + } + wg.Wait() + return nil +} \ No newline at end of file diff --git a/extender/code/out/kaniko-app b/extender/code/out/kaniko-app new file mode 100755 index 000000000..a6f1a9242 Binary files /dev/null and b/extender/code/out/kaniko-app differ diff --git a/extender/code/store/store.go b/extender/code/store/store.go new file mode 100644 index 000000000..9b62a76a5 --- /dev/null +++ b/extender/code/store/store.go @@ -0,0 +1,6 @@ +package store + +type TarFile struct { + Name string + Path string +} diff --git a/extender/code/test/util_test.go b/extender/code/test/util_test.go new file mode 100644 index 000000000..67248bd41 --- /dev/null +++ b/extender/code/test/util_test.go @@ -0,0 +1,43 @@ +package test + +import ( + cfg "github.com/redhat-buildpacks/poc/kaniko/buildpackconfig" + "github.com/redhat-buildpacks/poc/kaniko/util" + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +var envTests = []struct { + name string + envKey string + envVal string + expectedBuildArgs []string +}{ + { + name: "CNB foo and bar key, val", + envKey: "CNB_foo", + envVal: "bar", + expectedBuildArgs: []string{"CNB_foo=bar"}, + }, +} + +// TODO: To be reviewed and improved as we cannot build it on macos due to error +// ../vendor/github.com/docker/docker/builder/dockerfile/internals.go:193:19: undefined: parseChownFlag +func TestEnvToBuildArgs(t *testing.T) { + + b := cfg.NewBuildPackConfig() + + for _, test := range envTests { + t.Run(test.name, func(t *testing.T) { + + // set CNB env var + os.Setenv(test.envKey, test.envVal) + + // Read the env vars + b.CnbEnvVars = util.GetCNBEnvVar() + + assert.Equal(t,"1","1") + }) + } +} diff --git a/extender/code/util/util.go b/extender/code/util/util.go new file mode 100644 index 000000000..d8fb577da --- /dev/null +++ b/extender/code/util/util.go @@ -0,0 +1,192 @@ +package util + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" +) + +const root = "/" + +func GetCNBEnvVar() map[string]string { + kvs := map[string]string{} + envs := os.Environ() + + for _, env := range envs { + if strings.Contains(env, "CNB") { + str := strings.Split(env, "=") + kvs[str[0]] = str[1] + } + } + return kvs +} + +func GetValFromEnVar(envVar string) (val string) { + val, ok := os.LookupEnv(envVar) + if !ok { + logrus.Debugf("%s not set", envVar) + return "" + } else { + logrus.Debugf("%s=%s", envVar, val) + return val + } +} + +func ReadFileContent(f *os.File) { + data, err := ioutil.ReadFile(f.Name()) + if err != nil { + logrus.Errorf("Failed reading data from file: %s", err) + } + logrus.Debugf("\nFile Name: %s", f.Name()) + logrus.Debugf("\nData: %s", data) +} + +// File copies a single file from src to dst +func File(src, dst string) error { + var err error + var srcfd *os.File + var dstfd *os.File + var srcinfo os.FileInfo + + if srcfd, err = os.Open(src); err != nil { + return err + } + defer srcfd.Close() + + if dstfd, err = os.Create(dst); err != nil { + return err + } + defer dstfd.Close() + + if _, err = io.Copy(dstfd, srcfd); err != nil { + return err + } + if srcinfo, err = os.Stat(src); err != nil { + return err + } + return os.Chmod(dst, srcinfo.Mode()) +} + +func Dir(src string, dst string) error { + var err error + var fds []os.FileInfo + var srcinfo os.FileInfo + + if srcinfo, err = os.Stat(src); err != nil { + return err + } + + if err = os.MkdirAll(dst, srcinfo.Mode()); err != nil { + return err + } + + if fds, err = ioutil.ReadDir(src); err != nil { + return err + } + for _, fd := range fds { + srcfp := path.Join(src, fd.Name()) + dstfp := path.Join(dst, fd.Name()) + + if fd.IsDir() { + if err = Dir(srcfp, dstfp); err != nil { + fmt.Println(err) + } + } else { + if err = File(srcfp, dstfp); err != nil { + fmt.Println(err) + } + } + } + return nil +} + +func ReadFilesFromPath(path string) error { + files, err := ioutil.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + fmt.Println(file.Name(), file.IsDir()) + } + return nil +} + +func FindFiles(filesToSearch []string) error { + var files []string + + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + // TODO. Avoid to hard code the path to ignore + if strings.HasPrefix(filepath.ToSlash(path), "/proc") { + return nil + } + + if err != nil { + fmt.Println(err) + return nil + } + + for _, s := range filesToSearch { + logrus.Tracef("File searched is : %s", info.Name()) + if !info.IsDir() && info.Name() == s { + files = append(files, path) + } + } + return nil + }) + + if err != nil { + panic(err) + } + + for _, file := range files { + logrus.Infof("File found: %s", file) + } + return nil +} + +func UnGzip(r io.Reader) (gzf io.Reader, err error) { + logrus.Info("Creating a gzip reader") + gzf, err = gzip.NewReader(r) + return gzf, nil +} + +func UnTar(tarFilePath string) (tarR io.Reader, err error) { + logrus.Infof("Opening the tar file: %s", tarFilePath) + f, err := os.Open(tarFilePath) + if err != nil { + panic(err) + } + logrus.Infof("Creating a reader for: %s", f.Name()) + tarR = tar.NewReader(f) + return tarR, nil +} + +func FileExists(path string) bool { + _, err := os.Stat(path) + return !errors.Is(err, os.ErrNotExist) +} + +func FilterFiles(root, ext string) []string { + var files []string + filepath.Walk(root, func(path string, info os.FileInfo, e error) error { + if e != nil { + return e + } + logrus.Infof("Cache file: %s, ext: %s",info.Name(),filepath.Ext(path) ) + if !info.IsDir() && filepath.Ext(path) == ext { + files = append(files, path) + } + return nil + }) + logrus.Infof("Files found: %d",len(files)) + return files +} diff --git a/extender/code/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/LICENSE new file mode 100644 index 000000000..047555ec7 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/NOTICE new file mode 100644 index 000000000..2d1d72608 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/NOTICE @@ -0,0 +1,5 @@ +Microsoft Azure-SDK-for-Go +Copyright 2014-2017 Microsoft + +This product includes software developed at +the Microsoft Corporation (https://www.microsoft.com). diff --git a/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/client.go b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/client.go new file mode 100644 index 000000000..578a43823 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/client.go @@ -0,0 +1,52 @@ +// Package containerregistry implements the Azure ARM Containerregistry service API version . +// +// +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Containerregistry + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Containerregistry. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with +// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/models.go b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/models.go new file mode 100644 index 000000000..cf551f2a5 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/models.go @@ -0,0 +1,5139 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry" + +// Action enumerates the values for action. +type Action string + +const ( + // Allow ... + Allow Action = "Allow" +) + +// PossibleActionValues returns an array of possible values for the Action const type. +func PossibleActionValues() []Action { + return []Action{Allow} +} + +// Architecture enumerates the values for architecture. +type Architecture string + +const ( + // Amd64 ... + Amd64 Architecture = "amd64" + // Arm ... + Arm Architecture = "arm" + // X86 ... + X86 Architecture = "x86" +) + +// PossibleArchitectureValues returns an array of possible values for the Architecture const type. +func PossibleArchitectureValues() []Architecture { + return []Architecture{Amd64, Arm, X86} +} + +// BaseImageDependencyType enumerates the values for base image dependency type. +type BaseImageDependencyType string + +const ( + // BuildTime ... + BuildTime BaseImageDependencyType = "BuildTime" + // RunTime ... + RunTime BaseImageDependencyType = "RunTime" +) + +// PossibleBaseImageDependencyTypeValues returns an array of possible values for the BaseImageDependencyType const type. +func PossibleBaseImageDependencyTypeValues() []BaseImageDependencyType { + return []BaseImageDependencyType{BuildTime, RunTime} +} + +// BaseImageTriggerType enumerates the values for base image trigger type. +type BaseImageTriggerType string + +const ( + // All ... + All BaseImageTriggerType = "All" + // Runtime ... + Runtime BaseImageTriggerType = "Runtime" +) + +// PossibleBaseImageTriggerTypeValues returns an array of possible values for the BaseImageTriggerType const type. +func PossibleBaseImageTriggerTypeValues() []BaseImageTriggerType { + return []BaseImageTriggerType{All, Runtime} +} + +// DefaultAction enumerates the values for default action. +type DefaultAction string + +const ( + // DefaultActionAllow ... + DefaultActionAllow DefaultAction = "Allow" + // DefaultActionDeny ... + DefaultActionDeny DefaultAction = "Deny" +) + +// PossibleDefaultActionValues returns an array of possible values for the DefaultAction const type. +func PossibleDefaultActionValues() []DefaultAction { + return []DefaultAction{DefaultActionAllow, DefaultActionDeny} +} + +// ImportMode enumerates the values for import mode. +type ImportMode string + +const ( + // Force ... + Force ImportMode = "Force" + // NoForce ... + NoForce ImportMode = "NoForce" +) + +// PossibleImportModeValues returns an array of possible values for the ImportMode const type. +func PossibleImportModeValues() []ImportMode { + return []ImportMode{Force, NoForce} +} + +// OS enumerates the values for os. +type OS string + +const ( + // Linux ... + Linux OS = "Linux" + // Windows ... + Windows OS = "Windows" +) + +// PossibleOSValues returns an array of possible values for the OS const type. +func PossibleOSValues() []OS { + return []OS{Linux, Windows} +} + +// PasswordName enumerates the values for password name. +type PasswordName string + +const ( + // Password ... + Password PasswordName = "password" + // Password2 ... + Password2 PasswordName = "password2" +) + +// PossiblePasswordNameValues returns an array of possible values for the PasswordName const type. +func PossiblePasswordNameValues() []PasswordName { + return []PasswordName{Password, Password2} +} + +// PolicyStatus enumerates the values for policy status. +type PolicyStatus string + +const ( + // Disabled ... + Disabled PolicyStatus = "disabled" + // Enabled ... + Enabled PolicyStatus = "enabled" +) + +// PossiblePolicyStatusValues returns an array of possible values for the PolicyStatus const type. +func PossiblePolicyStatusValues() []PolicyStatus { + return []PolicyStatus{Disabled, Enabled} +} + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // Canceled ... + Canceled ProvisioningState = "Canceled" + // Creating ... + Creating ProvisioningState = "Creating" + // Deleting ... + Deleting ProvisioningState = "Deleting" + // Failed ... + Failed ProvisioningState = "Failed" + // Succeeded ... + Succeeded ProvisioningState = "Succeeded" + // Updating ... + Updating ProvisioningState = "Updating" +) + +// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. +func PossibleProvisioningStateValues() []ProvisioningState { + return []ProvisioningState{Canceled, Creating, Deleting, Failed, Succeeded, Updating} +} + +// RegistryUsageUnit enumerates the values for registry usage unit. +type RegistryUsageUnit string + +const ( + // Bytes ... + Bytes RegistryUsageUnit = "Bytes" + // Count ... + Count RegistryUsageUnit = "Count" +) + +// PossibleRegistryUsageUnitValues returns an array of possible values for the RegistryUsageUnit const type. +func PossibleRegistryUsageUnitValues() []RegistryUsageUnit { + return []RegistryUsageUnit{Bytes, Count} +} + +// ResourceIdentityType enumerates the values for resource identity type. +type ResourceIdentityType string + +const ( + // None ... + None ResourceIdentityType = "None" + // SystemAssigned ... + SystemAssigned ResourceIdentityType = "SystemAssigned" + // SystemAssignedUserAssigned ... + SystemAssignedUserAssigned ResourceIdentityType = "SystemAssigned, UserAssigned" + // UserAssigned ... + UserAssigned ResourceIdentityType = "UserAssigned" +) + +// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type. +func PossibleResourceIdentityTypeValues() []ResourceIdentityType { + return []ResourceIdentityType{None, SystemAssigned, SystemAssignedUserAssigned, UserAssigned} +} + +// RunStatus enumerates the values for run status. +type RunStatus string + +const ( + // RunStatusCanceled ... + RunStatusCanceled RunStatus = "Canceled" + // RunStatusError ... + RunStatusError RunStatus = "Error" + // RunStatusFailed ... + RunStatusFailed RunStatus = "Failed" + // RunStatusQueued ... + RunStatusQueued RunStatus = "Queued" + // RunStatusRunning ... + RunStatusRunning RunStatus = "Running" + // RunStatusStarted ... + RunStatusStarted RunStatus = "Started" + // RunStatusSucceeded ... + RunStatusSucceeded RunStatus = "Succeeded" + // RunStatusTimeout ... + RunStatusTimeout RunStatus = "Timeout" +) + +// PossibleRunStatusValues returns an array of possible values for the RunStatus const type. +func PossibleRunStatusValues() []RunStatus { + return []RunStatus{RunStatusCanceled, RunStatusError, RunStatusFailed, RunStatusQueued, RunStatusRunning, RunStatusStarted, RunStatusSucceeded, RunStatusTimeout} +} + +// RunType enumerates the values for run type. +type RunType string + +const ( + // AutoBuild ... + AutoBuild RunType = "AutoBuild" + // AutoRun ... + AutoRun RunType = "AutoRun" + // QuickBuild ... + QuickBuild RunType = "QuickBuild" + // QuickRun ... + QuickRun RunType = "QuickRun" +) + +// PossibleRunTypeValues returns an array of possible values for the RunType const type. +func PossibleRunTypeValues() []RunType { + return []RunType{AutoBuild, AutoRun, QuickBuild, QuickRun} +} + +// SecretObjectType enumerates the values for secret object type. +type SecretObjectType string + +const ( + // Opaque ... + Opaque SecretObjectType = "Opaque" + // Vaultsecret ... + Vaultsecret SecretObjectType = "Vaultsecret" +) + +// PossibleSecretObjectTypeValues returns an array of possible values for the SecretObjectType const type. +func PossibleSecretObjectTypeValues() []SecretObjectType { + return []SecretObjectType{Opaque, Vaultsecret} +} + +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // Basic ... + Basic SkuName = "Basic" + // Classic ... + Classic SkuName = "Classic" + // Premium ... + Premium SkuName = "Premium" + // Standard ... + Standard SkuName = "Standard" +) + +// PossibleSkuNameValues returns an array of possible values for the SkuName const type. +func PossibleSkuNameValues() []SkuName { + return []SkuName{Basic, Classic, Premium, Standard} +} + +// SkuTier enumerates the values for sku tier. +type SkuTier string + +const ( + // SkuTierBasic ... + SkuTierBasic SkuTier = "Basic" + // SkuTierClassic ... + SkuTierClassic SkuTier = "Classic" + // SkuTierPremium ... + SkuTierPremium SkuTier = "Premium" + // SkuTierStandard ... + SkuTierStandard SkuTier = "Standard" +) + +// PossibleSkuTierValues returns an array of possible values for the SkuTier const type. +func PossibleSkuTierValues() []SkuTier { + return []SkuTier{SkuTierBasic, SkuTierClassic, SkuTierPremium, SkuTierStandard} +} + +// SourceControlType enumerates the values for source control type. +type SourceControlType string + +const ( + // Github ... + Github SourceControlType = "Github" + // VisualStudioTeamService ... + VisualStudioTeamService SourceControlType = "VisualStudioTeamService" +) + +// PossibleSourceControlTypeValues returns an array of possible values for the SourceControlType const type. +func PossibleSourceControlTypeValues() []SourceControlType { + return []SourceControlType{Github, VisualStudioTeamService} +} + +// SourceRegistryLoginMode enumerates the values for source registry login mode. +type SourceRegistryLoginMode string + +const ( + // SourceRegistryLoginModeDefault ... + SourceRegistryLoginModeDefault SourceRegistryLoginMode = "Default" + // SourceRegistryLoginModeNone ... + SourceRegistryLoginModeNone SourceRegistryLoginMode = "None" +) + +// PossibleSourceRegistryLoginModeValues returns an array of possible values for the SourceRegistryLoginMode const type. +func PossibleSourceRegistryLoginModeValues() []SourceRegistryLoginMode { + return []SourceRegistryLoginMode{SourceRegistryLoginModeDefault, SourceRegistryLoginModeNone} +} + +// SourceTriggerEvent enumerates the values for source trigger event. +type SourceTriggerEvent string + +const ( + // Commit ... + Commit SourceTriggerEvent = "commit" + // Pullrequest ... + Pullrequest SourceTriggerEvent = "pullrequest" +) + +// PossibleSourceTriggerEventValues returns an array of possible values for the SourceTriggerEvent const type. +func PossibleSourceTriggerEventValues() []SourceTriggerEvent { + return []SourceTriggerEvent{Commit, Pullrequest} +} + +// TaskStatus enumerates the values for task status. +type TaskStatus string + +const ( + // TaskStatusDisabled ... + TaskStatusDisabled TaskStatus = "Disabled" + // TaskStatusEnabled ... + TaskStatusEnabled TaskStatus = "Enabled" +) + +// PossibleTaskStatusValues returns an array of possible values for the TaskStatus const type. +func PossibleTaskStatusValues() []TaskStatus { + return []TaskStatus{TaskStatusDisabled, TaskStatusEnabled} +} + +// TokenType enumerates the values for token type. +type TokenType string + +const ( + // OAuth ... + OAuth TokenType = "OAuth" + // PAT ... + PAT TokenType = "PAT" +) + +// PossibleTokenTypeValues returns an array of possible values for the TokenType const type. +func PossibleTokenTypeValues() []TokenType { + return []TokenType{OAuth, PAT} +} + +// TriggerStatus enumerates the values for trigger status. +type TriggerStatus string + +const ( + // TriggerStatusDisabled ... + TriggerStatusDisabled TriggerStatus = "Disabled" + // TriggerStatusEnabled ... + TriggerStatusEnabled TriggerStatus = "Enabled" +) + +// PossibleTriggerStatusValues returns an array of possible values for the TriggerStatus const type. +func PossibleTriggerStatusValues() []TriggerStatus { + return []TriggerStatus{TriggerStatusDisabled, TriggerStatusEnabled} +} + +// TrustPolicyType enumerates the values for trust policy type. +type TrustPolicyType string + +const ( + // Notary ... + Notary TrustPolicyType = "Notary" +) + +// PossibleTrustPolicyTypeValues returns an array of possible values for the TrustPolicyType const type. +func PossibleTrustPolicyTypeValues() []TrustPolicyType { + return []TrustPolicyType{Notary} +} + +// Type enumerates the values for type. +type Type string + +const ( + // TypeDockerBuildRequest ... + TypeDockerBuildRequest Type = "DockerBuildRequest" + // TypeEncodedTaskRunRequest ... + TypeEncodedTaskRunRequest Type = "EncodedTaskRunRequest" + // TypeFileTaskRunRequest ... + TypeFileTaskRunRequest Type = "FileTaskRunRequest" + // TypeRunRequest ... + TypeRunRequest Type = "RunRequest" + // TypeTaskRunRequest ... + TypeTaskRunRequest Type = "TaskRunRequest" +) + +// PossibleTypeValues returns an array of possible values for the Type const type. +func PossibleTypeValues() []Type { + return []Type{TypeDockerBuildRequest, TypeEncodedTaskRunRequest, TypeFileTaskRunRequest, TypeRunRequest, TypeTaskRunRequest} +} + +// TypeBasicTaskStepProperties enumerates the values for type basic task step properties. +type TypeBasicTaskStepProperties string + +const ( + // TypeDocker ... + TypeDocker TypeBasicTaskStepProperties = "Docker" + // TypeEncodedTask ... + TypeEncodedTask TypeBasicTaskStepProperties = "EncodedTask" + // TypeFileTask ... + TypeFileTask TypeBasicTaskStepProperties = "FileTask" + // TypeTaskStepProperties ... + TypeTaskStepProperties TypeBasicTaskStepProperties = "TaskStepProperties" +) + +// PossibleTypeBasicTaskStepPropertiesValues returns an array of possible values for the TypeBasicTaskStepProperties const type. +func PossibleTypeBasicTaskStepPropertiesValues() []TypeBasicTaskStepProperties { + return []TypeBasicTaskStepProperties{TypeDocker, TypeEncodedTask, TypeFileTask, TypeTaskStepProperties} +} + +// TypeBasicTaskStepUpdateParameters enumerates the values for type basic task step update parameters. +type TypeBasicTaskStepUpdateParameters string + +const ( + // TypeBasicTaskStepUpdateParametersTypeDocker ... + TypeBasicTaskStepUpdateParametersTypeDocker TypeBasicTaskStepUpdateParameters = "Docker" + // TypeBasicTaskStepUpdateParametersTypeEncodedTask ... + TypeBasicTaskStepUpdateParametersTypeEncodedTask TypeBasicTaskStepUpdateParameters = "EncodedTask" + // TypeBasicTaskStepUpdateParametersTypeFileTask ... + TypeBasicTaskStepUpdateParametersTypeFileTask TypeBasicTaskStepUpdateParameters = "FileTask" + // TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters ... + TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters TypeBasicTaskStepUpdateParameters = "TaskStepUpdateParameters" +) + +// PossibleTypeBasicTaskStepUpdateParametersValues returns an array of possible values for the TypeBasicTaskStepUpdateParameters const type. +func PossibleTypeBasicTaskStepUpdateParametersValues() []TypeBasicTaskStepUpdateParameters { + return []TypeBasicTaskStepUpdateParameters{TypeBasicTaskStepUpdateParametersTypeDocker, TypeBasicTaskStepUpdateParametersTypeEncodedTask, TypeBasicTaskStepUpdateParametersTypeFileTask, TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters} +} + +// Variant enumerates the values for variant. +type Variant string + +const ( + // V6 ... + V6 Variant = "v6" + // V7 ... + V7 Variant = "v7" + // V8 ... + V8 Variant = "v8" +) + +// PossibleVariantValues returns an array of possible values for the Variant const type. +func PossibleVariantValues() []Variant { + return []Variant{V6, V7, V8} +} + +// WebhookAction enumerates the values for webhook action. +type WebhookAction string + +const ( + // ChartDelete ... + ChartDelete WebhookAction = "chart_delete" + // ChartPush ... + ChartPush WebhookAction = "chart_push" + // Delete ... + Delete WebhookAction = "delete" + // Push ... + Push WebhookAction = "push" + // Quarantine ... + Quarantine WebhookAction = "quarantine" +) + +// PossibleWebhookActionValues returns an array of possible values for the WebhookAction const type. +func PossibleWebhookActionValues() []WebhookAction { + return []WebhookAction{ChartDelete, ChartPush, Delete, Push, Quarantine} +} + +// WebhookStatus enumerates the values for webhook status. +type WebhookStatus string + +const ( + // WebhookStatusDisabled ... + WebhookStatusDisabled WebhookStatus = "disabled" + // WebhookStatusEnabled ... + WebhookStatusEnabled WebhookStatus = "enabled" +) + +// PossibleWebhookStatusValues returns an array of possible values for the WebhookStatus const type. +func PossibleWebhookStatusValues() []WebhookStatus { + return []WebhookStatus{WebhookStatusDisabled, WebhookStatusEnabled} +} + +// Actor the agent that initiated the event. For most situations, this could be from the authorization +// context of the request. +type Actor struct { + // Name - The subject or username associated with the request context that generated the event. + Name *string `json:"name,omitempty"` +} + +// AgentProperties the properties that determine the run agent configuration. +type AgentProperties struct { + // CPU - The CPU configuration in terms of number of cores required for the run. + CPU *int32 `json:"cpu,omitempty"` +} + +// Argument the properties of a run argument. +type Argument struct { + // Name - The name of the argument. + Name *string `json:"name,omitempty"` + // Value - The value of the argument. + Value *string `json:"value,omitempty"` + // IsSecret - Flag to indicate whether the argument represents a secret and want to be removed from build logs. + IsSecret *bool `json:"isSecret,omitempty"` +} + +// AuthInfo the authorization properties for accessing the source code repository. +type AuthInfo struct { + // TokenType - The type of Auth token. Possible values include: 'PAT', 'OAuth' + TokenType TokenType `json:"tokenType,omitempty"` + // Token - The access token used to access the source control provider. + Token *string `json:"token,omitempty"` + // RefreshToken - The refresh token used to refresh the access token. + RefreshToken *string `json:"refreshToken,omitempty"` + // Scope - The scope of the access token. + Scope *string `json:"scope,omitempty"` + // ExpiresIn - Time in seconds that the token remains valid + ExpiresIn *int32 `json:"expiresIn,omitempty"` +} + +// AuthInfoUpdateParameters the authorization properties for accessing the source code repository. +type AuthInfoUpdateParameters struct { + // TokenType - The type of Auth token. Possible values include: 'PAT', 'OAuth' + TokenType TokenType `json:"tokenType,omitempty"` + // Token - The access token used to access the source control provider. + Token *string `json:"token,omitempty"` + // RefreshToken - The refresh token used to refresh the access token. + RefreshToken *string `json:"refreshToken,omitempty"` + // Scope - The scope of the access token. + Scope *string `json:"scope,omitempty"` + // ExpiresIn - Time in seconds that the token remains valid + ExpiresIn *int32 `json:"expiresIn,omitempty"` +} + +// BaseImageDependency properties that describe a base image dependency. +type BaseImageDependency struct { + // Type - The type of the base image dependency. Possible values include: 'BuildTime', 'RunTime' + Type BaseImageDependencyType `json:"type,omitempty"` + // Registry - The registry login server. + Registry *string `json:"registry,omitempty"` + // Repository - The repository name. + Repository *string `json:"repository,omitempty"` + // Tag - The tag name. + Tag *string `json:"tag,omitempty"` + // Digest - The sha256-based digest of the image manifest. + Digest *string `json:"digest,omitempty"` +} + +// BaseImageTrigger the trigger based on base image dependency. +type BaseImageTrigger struct { + // BaseImageTriggerType - The type of the auto trigger for base image dependency updates. Possible values include: 'All', 'Runtime' + BaseImageTriggerType BaseImageTriggerType `json:"baseImageTriggerType,omitempty"` + // Status - The current status of trigger. Possible values include: 'TriggerStatusDisabled', 'TriggerStatusEnabled' + Status TriggerStatus `json:"status,omitempty"` + // Name - The name of the trigger. + Name *string `json:"name,omitempty"` +} + +// BaseImageTriggerUpdateParameters the properties for updating base image dependency trigger. +type BaseImageTriggerUpdateParameters struct { + // BaseImageTriggerType - The type of the auto trigger for base image dependency updates. Possible values include: 'All', 'Runtime' + BaseImageTriggerType BaseImageTriggerType `json:"baseImageTriggerType,omitempty"` + // Status - The current status of trigger. Possible values include: 'TriggerStatusDisabled', 'TriggerStatusEnabled' + Status TriggerStatus `json:"status,omitempty"` + // Name - The name of the trigger. + Name *string `json:"name,omitempty"` +} + +// CallbackConfig the configuration of service URI and custom headers for the webhook. +type CallbackConfig struct { + autorest.Response `json:"-"` + // ServiceURI - The service URI for the webhook to post notifications. + ServiceURI *string `json:"serviceUri,omitempty"` + // CustomHeaders - Custom headers that will be added to the webhook notifications. + CustomHeaders map[string]*string `json:"customHeaders"` +} + +// MarshalJSON is the custom marshaler for CallbackConfig. +func (cc CallbackConfig) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cc.ServiceURI != nil { + objectMap["serviceUri"] = cc.ServiceURI + } + if cc.CustomHeaders != nil { + objectMap["customHeaders"] = cc.CustomHeaders + } + return json.Marshal(objectMap) +} + +// Credentials the parameters that describes a set of credentials that will be used when a run is invoked. +type Credentials struct { + // SourceRegistry - Describes the credential parameters for accessing the source registry. + SourceRegistry *SourceRegistryCredentials `json:"sourceRegistry,omitempty"` + // CustomRegistries - Describes the credential parameters for accessing other custom registries. The key + // for the dictionary item will be the registry login server (myregistry.azurecr.io) and + // the value of the item will be the registry credentials for accessing the registry. + CustomRegistries map[string]*CustomRegistryCredentials `json:"customRegistries"` +} + +// MarshalJSON is the custom marshaler for Credentials. +func (c Credentials) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if c.SourceRegistry != nil { + objectMap["sourceRegistry"] = c.SourceRegistry + } + if c.CustomRegistries != nil { + objectMap["customRegistries"] = c.CustomRegistries + } + return json.Marshal(objectMap) +} + +// CustomRegistryCredentials describes the credentials that will be used to access a custom registry during +// a run. +type CustomRegistryCredentials struct { + // UserName - The username for logging into the custom registry. + UserName *SecretObject `json:"userName,omitempty"` + // Password - The password for logging into the custom registry. The password is a secret + // object that allows multiple ways of providing the value for it. + Password *SecretObject `json:"password,omitempty"` + // Identity - Indicates the managed identity assigned to the custom credential. If a user-assigned identity + // this value is the Client ID. If a system-assigned identity, the value will be `system`. In + // the case of a system-assigned identity, the Client ID will be determined by the runner. This + // identity may be used to authenticate to key vault to retrieve credentials or it may be the only + // source of authentication used for accessing the registry. + Identity *string `json:"identity,omitempty"` +} + +// DockerBuildRequest the parameters for a docker quick build. +type DockerBuildRequest struct { + // ImageNames - The fully qualified image names including the repository and tag. + ImageNames *[]string `json:"imageNames,omitempty"` + // IsPushEnabled - The value of this property indicates whether the image built should be pushed to the registry or not. + IsPushEnabled *bool `json:"isPushEnabled,omitempty"` + // NoCache - The value of this property indicates whether the image cache is enabled or not. + NoCache *bool `json:"noCache,omitempty"` + // DockerFilePath - The Docker file path relative to the source location. + DockerFilePath *string `json:"dockerFilePath,omitempty"` + // Target - The name of the target build stage for the docker build. + Target *string `json:"target,omitempty"` + // Arguments - The collection of override arguments to be used when executing the run. + Arguments *[]Argument `json:"arguments,omitempty"` + // Timeout - Run timeout in seconds. + Timeout *int32 `json:"timeout,omitempty"` + // Platform - The platform properties against which the run has to happen. + Platform *PlatformProperties `json:"platform,omitempty"` + // AgentConfiguration - The machine configuration of the run agent. + AgentConfiguration *AgentProperties `json:"agentConfiguration,omitempty"` + // SourceLocation - The URL(absolute or relative) of the source context. It can be an URL to a tar or git repository. + // If it is relative URL, the relative path should be obtained from calling listBuildSourceUploadUrl API. + SourceLocation *string `json:"sourceLocation,omitempty"` + // Credentials - The properties that describes a set of credentials that will be used when this run is invoked. + Credentials *Credentials `json:"credentials,omitempty"` + // IsArchiveEnabled - The value that indicates whether archiving is enabled for the run or not. + IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"` + // Type - Possible values include: 'TypeRunRequest', 'TypeDockerBuildRequest', 'TypeFileTaskRunRequest', 'TypeTaskRunRequest', 'TypeEncodedTaskRunRequest' + Type Type `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for DockerBuildRequest. +func (dbr DockerBuildRequest) MarshalJSON() ([]byte, error) { + dbr.Type = TypeDockerBuildRequest + objectMap := make(map[string]interface{}) + if dbr.ImageNames != nil { + objectMap["imageNames"] = dbr.ImageNames + } + if dbr.IsPushEnabled != nil { + objectMap["isPushEnabled"] = dbr.IsPushEnabled + } + if dbr.NoCache != nil { + objectMap["noCache"] = dbr.NoCache + } + if dbr.DockerFilePath != nil { + objectMap["dockerFilePath"] = dbr.DockerFilePath + } + if dbr.Target != nil { + objectMap["target"] = dbr.Target + } + if dbr.Arguments != nil { + objectMap["arguments"] = dbr.Arguments + } + if dbr.Timeout != nil { + objectMap["timeout"] = dbr.Timeout + } + if dbr.Platform != nil { + objectMap["platform"] = dbr.Platform + } + if dbr.AgentConfiguration != nil { + objectMap["agentConfiguration"] = dbr.AgentConfiguration + } + if dbr.SourceLocation != nil { + objectMap["sourceLocation"] = dbr.SourceLocation + } + if dbr.Credentials != nil { + objectMap["credentials"] = dbr.Credentials + } + if dbr.IsArchiveEnabled != nil { + objectMap["isArchiveEnabled"] = dbr.IsArchiveEnabled + } + if dbr.Type != "" { + objectMap["type"] = dbr.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildRequest is the BasicRunRequest implementation for DockerBuildRequest. +func (dbr DockerBuildRequest) AsDockerBuildRequest() (*DockerBuildRequest, bool) { + return &dbr, true +} + +// AsFileTaskRunRequest is the BasicRunRequest implementation for DockerBuildRequest. +func (dbr DockerBuildRequest) AsFileTaskRunRequest() (*FileTaskRunRequest, bool) { + return nil, false +} + +// AsTaskRunRequest is the BasicRunRequest implementation for DockerBuildRequest. +func (dbr DockerBuildRequest) AsTaskRunRequest() (*TaskRunRequest, bool) { + return nil, false +} + +// AsEncodedTaskRunRequest is the BasicRunRequest implementation for DockerBuildRequest. +func (dbr DockerBuildRequest) AsEncodedTaskRunRequest() (*EncodedTaskRunRequest, bool) { + return nil, false +} + +// AsRunRequest is the BasicRunRequest implementation for DockerBuildRequest. +func (dbr DockerBuildRequest) AsRunRequest() (*RunRequest, bool) { + return nil, false +} + +// AsBasicRunRequest is the BasicRunRequest implementation for DockerBuildRequest. +func (dbr DockerBuildRequest) AsBasicRunRequest() (BasicRunRequest, bool) { + return &dbr, true +} + +// DockerBuildStep the Docker build step. +type DockerBuildStep struct { + // ImageNames - The fully qualified image names including the repository and tag. + ImageNames *[]string `json:"imageNames,omitempty"` + // IsPushEnabled - The value of this property indicates whether the image built should be pushed to the registry or not. + IsPushEnabled *bool `json:"isPushEnabled,omitempty"` + // NoCache - The value of this property indicates whether the image cache is enabled or not. + NoCache *bool `json:"noCache,omitempty"` + // DockerFilePath - The Docker file path relative to the source context. + DockerFilePath *string `json:"dockerFilePath,omitempty"` + // Target - The name of the target build stage for the docker build. + Target *string `json:"target,omitempty"` + // Arguments - The collection of override arguments to be used when executing this build step. + Arguments *[]Argument `json:"arguments,omitempty"` + // BaseImageDependencies - READ-ONLY; List of base image dependencies for a step. + BaseImageDependencies *[]BaseImageDependency `json:"baseImageDependencies,omitempty"` + // ContextPath - The URL(absolute or relative) of the source context for the task step. + ContextPath *string `json:"contextPath,omitempty"` + // ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step. + ContextAccessToken *string `json:"contextAccessToken,omitempty"` + // Type - Possible values include: 'TypeTaskStepProperties', 'TypeDocker', 'TypeFileTask', 'TypeEncodedTask' + Type TypeBasicTaskStepProperties `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for DockerBuildStep. +func (dbs DockerBuildStep) MarshalJSON() ([]byte, error) { + dbs.Type = TypeDocker + objectMap := make(map[string]interface{}) + if dbs.ImageNames != nil { + objectMap["imageNames"] = dbs.ImageNames + } + if dbs.IsPushEnabled != nil { + objectMap["isPushEnabled"] = dbs.IsPushEnabled + } + if dbs.NoCache != nil { + objectMap["noCache"] = dbs.NoCache + } + if dbs.DockerFilePath != nil { + objectMap["dockerFilePath"] = dbs.DockerFilePath + } + if dbs.Target != nil { + objectMap["target"] = dbs.Target + } + if dbs.Arguments != nil { + objectMap["arguments"] = dbs.Arguments + } + if dbs.ContextPath != nil { + objectMap["contextPath"] = dbs.ContextPath + } + if dbs.ContextAccessToken != nil { + objectMap["contextAccessToken"] = dbs.ContextAccessToken + } + if dbs.Type != "" { + objectMap["type"] = dbs.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildStep is the BasicTaskStepProperties implementation for DockerBuildStep. +func (dbs DockerBuildStep) AsDockerBuildStep() (*DockerBuildStep, bool) { + return &dbs, true +} + +// AsFileTaskStep is the BasicTaskStepProperties implementation for DockerBuildStep. +func (dbs DockerBuildStep) AsFileTaskStep() (*FileTaskStep, bool) { + return nil, false +} + +// AsEncodedTaskStep is the BasicTaskStepProperties implementation for DockerBuildStep. +func (dbs DockerBuildStep) AsEncodedTaskStep() (*EncodedTaskStep, bool) { + return nil, false +} + +// AsTaskStepProperties is the BasicTaskStepProperties implementation for DockerBuildStep. +func (dbs DockerBuildStep) AsTaskStepProperties() (*TaskStepProperties, bool) { + return nil, false +} + +// AsBasicTaskStepProperties is the BasicTaskStepProperties implementation for DockerBuildStep. +func (dbs DockerBuildStep) AsBasicTaskStepProperties() (BasicTaskStepProperties, bool) { + return &dbs, true +} + +// DockerBuildStepUpdateParameters the properties for updating a docker build step. +type DockerBuildStepUpdateParameters struct { + // ImageNames - The fully qualified image names including the repository and tag. + ImageNames *[]string `json:"imageNames,omitempty"` + // IsPushEnabled - The value of this property indicates whether the image built should be pushed to the registry or not. + IsPushEnabled *bool `json:"isPushEnabled,omitempty"` + // NoCache - The value of this property indicates whether the image cache is enabled or not. + NoCache *bool `json:"noCache,omitempty"` + // DockerFilePath - The Docker file path relative to the source context. + DockerFilePath *string `json:"dockerFilePath,omitempty"` + // Arguments - The collection of override arguments to be used when executing this build step. + Arguments *[]Argument `json:"arguments,omitempty"` + // Target - The name of the target build stage for the docker build. + Target *string `json:"target,omitempty"` + // ContextPath - The URL(absolute or relative) of the source context for the task step. + ContextPath *string `json:"contextPath,omitempty"` + // ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step. + ContextAccessToken *string `json:"contextAccessToken,omitempty"` + // Type - Possible values include: 'TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters', 'TypeBasicTaskStepUpdateParametersTypeDocker', 'TypeBasicTaskStepUpdateParametersTypeFileTask', 'TypeBasicTaskStepUpdateParametersTypeEncodedTask' + Type TypeBasicTaskStepUpdateParameters `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for DockerBuildStepUpdateParameters. +func (dbsup DockerBuildStepUpdateParameters) MarshalJSON() ([]byte, error) { + dbsup.Type = TypeBasicTaskStepUpdateParametersTypeDocker + objectMap := make(map[string]interface{}) + if dbsup.ImageNames != nil { + objectMap["imageNames"] = dbsup.ImageNames + } + if dbsup.IsPushEnabled != nil { + objectMap["isPushEnabled"] = dbsup.IsPushEnabled + } + if dbsup.NoCache != nil { + objectMap["noCache"] = dbsup.NoCache + } + if dbsup.DockerFilePath != nil { + objectMap["dockerFilePath"] = dbsup.DockerFilePath + } + if dbsup.Arguments != nil { + objectMap["arguments"] = dbsup.Arguments + } + if dbsup.Target != nil { + objectMap["target"] = dbsup.Target + } + if dbsup.ContextPath != nil { + objectMap["contextPath"] = dbsup.ContextPath + } + if dbsup.ContextAccessToken != nil { + objectMap["contextAccessToken"] = dbsup.ContextAccessToken + } + if dbsup.Type != "" { + objectMap["type"] = dbsup.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for DockerBuildStepUpdateParameters. +func (dbsup DockerBuildStepUpdateParameters) AsDockerBuildStepUpdateParameters() (*DockerBuildStepUpdateParameters, bool) { + return &dbsup, true +} + +// AsFileTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for DockerBuildStepUpdateParameters. +func (dbsup DockerBuildStepUpdateParameters) AsFileTaskStepUpdateParameters() (*FileTaskStepUpdateParameters, bool) { + return nil, false +} + +// AsEncodedTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for DockerBuildStepUpdateParameters. +func (dbsup DockerBuildStepUpdateParameters) AsEncodedTaskStepUpdateParameters() (*EncodedTaskStepUpdateParameters, bool) { + return nil, false +} + +// AsTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for DockerBuildStepUpdateParameters. +func (dbsup DockerBuildStepUpdateParameters) AsTaskStepUpdateParameters() (*TaskStepUpdateParameters, bool) { + return nil, false +} + +// AsBasicTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for DockerBuildStepUpdateParameters. +func (dbsup DockerBuildStepUpdateParameters) AsBasicTaskStepUpdateParameters() (BasicTaskStepUpdateParameters, bool) { + return &dbsup, true +} + +// EncodedTaskRunRequest the parameters for a quick task run request. +type EncodedTaskRunRequest struct { + // EncodedTaskContent - Base64 encoded value of the template/definition file content. + EncodedTaskContent *string `json:"encodedTaskContent,omitempty"` + // EncodedValuesContent - Base64 encoded value of the parameters/values file content. + EncodedValuesContent *string `json:"encodedValuesContent,omitempty"` + // Values - The collection of overridable values that can be passed when running a task. + Values *[]SetValue `json:"values,omitempty"` + // Timeout - Run timeout in seconds. + Timeout *int32 `json:"timeout,omitempty"` + // Platform - The platform properties against which the run has to happen. + Platform *PlatformProperties `json:"platform,omitempty"` + // AgentConfiguration - The machine configuration of the run agent. + AgentConfiguration *AgentProperties `json:"agentConfiguration,omitempty"` + // SourceLocation - The URL(absolute or relative) of the source context. It can be an URL to a tar or git repository. + // If it is relative URL, the relative path should be obtained from calling listBuildSourceUploadUrl API. + SourceLocation *string `json:"sourceLocation,omitempty"` + // Credentials - The properties that describes a set of credentials that will be used when this run is invoked. + Credentials *Credentials `json:"credentials,omitempty"` + // IsArchiveEnabled - The value that indicates whether archiving is enabled for the run or not. + IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"` + // Type - Possible values include: 'TypeRunRequest', 'TypeDockerBuildRequest', 'TypeFileTaskRunRequest', 'TypeTaskRunRequest', 'TypeEncodedTaskRunRequest' + Type Type `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for EncodedTaskRunRequest. +func (etrr EncodedTaskRunRequest) MarshalJSON() ([]byte, error) { + etrr.Type = TypeEncodedTaskRunRequest + objectMap := make(map[string]interface{}) + if etrr.EncodedTaskContent != nil { + objectMap["encodedTaskContent"] = etrr.EncodedTaskContent + } + if etrr.EncodedValuesContent != nil { + objectMap["encodedValuesContent"] = etrr.EncodedValuesContent + } + if etrr.Values != nil { + objectMap["values"] = etrr.Values + } + if etrr.Timeout != nil { + objectMap["timeout"] = etrr.Timeout + } + if etrr.Platform != nil { + objectMap["platform"] = etrr.Platform + } + if etrr.AgentConfiguration != nil { + objectMap["agentConfiguration"] = etrr.AgentConfiguration + } + if etrr.SourceLocation != nil { + objectMap["sourceLocation"] = etrr.SourceLocation + } + if etrr.Credentials != nil { + objectMap["credentials"] = etrr.Credentials + } + if etrr.IsArchiveEnabled != nil { + objectMap["isArchiveEnabled"] = etrr.IsArchiveEnabled + } + if etrr.Type != "" { + objectMap["type"] = etrr.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildRequest is the BasicRunRequest implementation for EncodedTaskRunRequest. +func (etrr EncodedTaskRunRequest) AsDockerBuildRequest() (*DockerBuildRequest, bool) { + return nil, false +} + +// AsFileTaskRunRequest is the BasicRunRequest implementation for EncodedTaskRunRequest. +func (etrr EncodedTaskRunRequest) AsFileTaskRunRequest() (*FileTaskRunRequest, bool) { + return nil, false +} + +// AsTaskRunRequest is the BasicRunRequest implementation for EncodedTaskRunRequest. +func (etrr EncodedTaskRunRequest) AsTaskRunRequest() (*TaskRunRequest, bool) { + return nil, false +} + +// AsEncodedTaskRunRequest is the BasicRunRequest implementation for EncodedTaskRunRequest. +func (etrr EncodedTaskRunRequest) AsEncodedTaskRunRequest() (*EncodedTaskRunRequest, bool) { + return &etrr, true +} + +// AsRunRequest is the BasicRunRequest implementation for EncodedTaskRunRequest. +func (etrr EncodedTaskRunRequest) AsRunRequest() (*RunRequest, bool) { + return nil, false +} + +// AsBasicRunRequest is the BasicRunRequest implementation for EncodedTaskRunRequest. +func (etrr EncodedTaskRunRequest) AsBasicRunRequest() (BasicRunRequest, bool) { + return &etrr, true +} + +// EncodedTaskStep the properties of a encoded task step. +type EncodedTaskStep struct { + // EncodedTaskContent - Base64 encoded value of the template/definition file content. + EncodedTaskContent *string `json:"encodedTaskContent,omitempty"` + // EncodedValuesContent - Base64 encoded value of the parameters/values file content. + EncodedValuesContent *string `json:"encodedValuesContent,omitempty"` + // Values - The collection of overridable values that can be passed when running a task. + Values *[]SetValue `json:"values,omitempty"` + // BaseImageDependencies - READ-ONLY; List of base image dependencies for a step. + BaseImageDependencies *[]BaseImageDependency `json:"baseImageDependencies,omitempty"` + // ContextPath - The URL(absolute or relative) of the source context for the task step. + ContextPath *string `json:"contextPath,omitempty"` + // ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step. + ContextAccessToken *string `json:"contextAccessToken,omitempty"` + // Type - Possible values include: 'TypeTaskStepProperties', 'TypeDocker', 'TypeFileTask', 'TypeEncodedTask' + Type TypeBasicTaskStepProperties `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for EncodedTaskStep. +func (ets EncodedTaskStep) MarshalJSON() ([]byte, error) { + ets.Type = TypeEncodedTask + objectMap := make(map[string]interface{}) + if ets.EncodedTaskContent != nil { + objectMap["encodedTaskContent"] = ets.EncodedTaskContent + } + if ets.EncodedValuesContent != nil { + objectMap["encodedValuesContent"] = ets.EncodedValuesContent + } + if ets.Values != nil { + objectMap["values"] = ets.Values + } + if ets.ContextPath != nil { + objectMap["contextPath"] = ets.ContextPath + } + if ets.ContextAccessToken != nil { + objectMap["contextAccessToken"] = ets.ContextAccessToken + } + if ets.Type != "" { + objectMap["type"] = ets.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildStep is the BasicTaskStepProperties implementation for EncodedTaskStep. +func (ets EncodedTaskStep) AsDockerBuildStep() (*DockerBuildStep, bool) { + return nil, false +} + +// AsFileTaskStep is the BasicTaskStepProperties implementation for EncodedTaskStep. +func (ets EncodedTaskStep) AsFileTaskStep() (*FileTaskStep, bool) { + return nil, false +} + +// AsEncodedTaskStep is the BasicTaskStepProperties implementation for EncodedTaskStep. +func (ets EncodedTaskStep) AsEncodedTaskStep() (*EncodedTaskStep, bool) { + return &ets, true +} + +// AsTaskStepProperties is the BasicTaskStepProperties implementation for EncodedTaskStep. +func (ets EncodedTaskStep) AsTaskStepProperties() (*TaskStepProperties, bool) { + return nil, false +} + +// AsBasicTaskStepProperties is the BasicTaskStepProperties implementation for EncodedTaskStep. +func (ets EncodedTaskStep) AsBasicTaskStepProperties() (BasicTaskStepProperties, bool) { + return &ets, true +} + +// EncodedTaskStepUpdateParameters the properties for updating encoded task step. +type EncodedTaskStepUpdateParameters struct { + // EncodedTaskContent - Base64 encoded value of the template/definition file content. + EncodedTaskContent *string `json:"encodedTaskContent,omitempty"` + // EncodedValuesContent - Base64 encoded value of the parameters/values file content. + EncodedValuesContent *string `json:"encodedValuesContent,omitempty"` + // Values - The collection of overridable values that can be passed when running a task. + Values *[]SetValue `json:"values,omitempty"` + // ContextPath - The URL(absolute or relative) of the source context for the task step. + ContextPath *string `json:"contextPath,omitempty"` + // ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step. + ContextAccessToken *string `json:"contextAccessToken,omitempty"` + // Type - Possible values include: 'TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters', 'TypeBasicTaskStepUpdateParametersTypeDocker', 'TypeBasicTaskStepUpdateParametersTypeFileTask', 'TypeBasicTaskStepUpdateParametersTypeEncodedTask' + Type TypeBasicTaskStepUpdateParameters `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for EncodedTaskStepUpdateParameters. +func (etsup EncodedTaskStepUpdateParameters) MarshalJSON() ([]byte, error) { + etsup.Type = TypeBasicTaskStepUpdateParametersTypeEncodedTask + objectMap := make(map[string]interface{}) + if etsup.EncodedTaskContent != nil { + objectMap["encodedTaskContent"] = etsup.EncodedTaskContent + } + if etsup.EncodedValuesContent != nil { + objectMap["encodedValuesContent"] = etsup.EncodedValuesContent + } + if etsup.Values != nil { + objectMap["values"] = etsup.Values + } + if etsup.ContextPath != nil { + objectMap["contextPath"] = etsup.ContextPath + } + if etsup.ContextAccessToken != nil { + objectMap["contextAccessToken"] = etsup.ContextAccessToken + } + if etsup.Type != "" { + objectMap["type"] = etsup.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for EncodedTaskStepUpdateParameters. +func (etsup EncodedTaskStepUpdateParameters) AsDockerBuildStepUpdateParameters() (*DockerBuildStepUpdateParameters, bool) { + return nil, false +} + +// AsFileTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for EncodedTaskStepUpdateParameters. +func (etsup EncodedTaskStepUpdateParameters) AsFileTaskStepUpdateParameters() (*FileTaskStepUpdateParameters, bool) { + return nil, false +} + +// AsEncodedTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for EncodedTaskStepUpdateParameters. +func (etsup EncodedTaskStepUpdateParameters) AsEncodedTaskStepUpdateParameters() (*EncodedTaskStepUpdateParameters, bool) { + return &etsup, true +} + +// AsTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for EncodedTaskStepUpdateParameters. +func (etsup EncodedTaskStepUpdateParameters) AsTaskStepUpdateParameters() (*TaskStepUpdateParameters, bool) { + return nil, false +} + +// AsBasicTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for EncodedTaskStepUpdateParameters. +func (etsup EncodedTaskStepUpdateParameters) AsBasicTaskStepUpdateParameters() (BasicTaskStepUpdateParameters, bool) { + return &etsup, true +} + +// Event the event for a webhook. +type Event struct { + // EventRequestMessage - The event request message sent to the service URI. + EventRequestMessage *EventRequestMessage `json:"eventRequestMessage,omitempty"` + // EventResponseMessage - The event response message received from the service URI. + EventResponseMessage *EventResponseMessage `json:"eventResponseMessage,omitempty"` + // ID - The event ID. + ID *string `json:"id,omitempty"` +} + +// EventContent the content of the event request message. +type EventContent struct { + // ID - The event ID. + ID *string `json:"id,omitempty"` + // Timestamp - The time at which the event occurred. + Timestamp *date.Time `json:"timestamp,omitempty"` + // Action - The action that encompasses the provided event. + Action *string `json:"action,omitempty"` + // Target - The target of the event. + Target *Target `json:"target,omitempty"` + // Request - The request that generated the event. + Request *Request `json:"request,omitempty"` + // Actor - The agent that initiated the event. For most situations, this could be from the authorization context of the request. + Actor *Actor `json:"actor,omitempty"` + // Source - The registry node that generated the event. Put differently, while the actor initiates the event, the source generates it. + Source *Source `json:"source,omitempty"` +} + +// EventInfo the basic information of an event. +type EventInfo struct { + autorest.Response `json:"-"` + // ID - The event ID. + ID *string `json:"id,omitempty"` +} + +// EventListResult the result of a request to list events for a webhook. +type EventListResult struct { + autorest.Response `json:"-"` + // Value - The list of events. Since this list may be incomplete, the nextLink field should be used to request the next list of events. + Value *[]Event `json:"value,omitempty"` + // NextLink - The URI that can be used to request the next list of events. + NextLink *string `json:"nextLink,omitempty"` +} + +// EventListResultIterator provides access to a complete listing of Event values. +type EventListResultIterator struct { + i int + page EventListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *EventListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EventListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *EventListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter EventListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter EventListResultIterator) Response() EventListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter EventListResultIterator) Value() Event { + if !iter.page.NotDone() { + return Event{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the EventListResultIterator type. +func NewEventListResultIterator(page EventListResultPage) EventListResultIterator { + return EventListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (elr EventListResult) IsEmpty() bool { + return elr.Value == nil || len(*elr.Value) == 0 +} + +// eventListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (elr EventListResult) eventListResultPreparer(ctx context.Context) (*http.Request, error) { + if elr.NextLink == nil || len(to.String(elr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(elr.NextLink))) +} + +// EventListResultPage contains a page of Event values. +type EventListResultPage struct { + fn func(context.Context, EventListResult) (EventListResult, error) + elr EventListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *EventListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EventListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.elr) + if err != nil { + return err + } + page.elr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *EventListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page EventListResultPage) NotDone() bool { + return !page.elr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page EventListResultPage) Response() EventListResult { + return page.elr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page EventListResultPage) Values() []Event { + if page.elr.IsEmpty() { + return nil + } + return *page.elr.Value +} + +// Creates a new instance of the EventListResultPage type. +func NewEventListResultPage(getNextPage func(context.Context, EventListResult) (EventListResult, error)) EventListResultPage { + return EventListResultPage{fn: getNextPage} +} + +// EventRequestMessage the event request message sent to the service URI. +type EventRequestMessage struct { + // Content - The content of the event request message. + Content *EventContent `json:"content,omitempty"` + // Headers - The headers of the event request message. + Headers map[string]*string `json:"headers"` + // Method - The HTTP method used to send the event request message. + Method *string `json:"method,omitempty"` + // RequestURI - The URI used to send the event request message. + RequestURI *string `json:"requestUri,omitempty"` + // Version - The HTTP message version. + Version *string `json:"version,omitempty"` +} + +// MarshalJSON is the custom marshaler for EventRequestMessage. +func (erm EventRequestMessage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if erm.Content != nil { + objectMap["content"] = erm.Content + } + if erm.Headers != nil { + objectMap["headers"] = erm.Headers + } + if erm.Method != nil { + objectMap["method"] = erm.Method + } + if erm.RequestURI != nil { + objectMap["requestUri"] = erm.RequestURI + } + if erm.Version != nil { + objectMap["version"] = erm.Version + } + return json.Marshal(objectMap) +} + +// EventResponseMessage the event response message received from the service URI. +type EventResponseMessage struct { + // Content - The content of the event response message. + Content *string `json:"content,omitempty"` + // Headers - The headers of the event response message. + Headers map[string]*string `json:"headers"` + // ReasonPhrase - The reason phrase of the event response message. + ReasonPhrase *string `json:"reasonPhrase,omitempty"` + // StatusCode - The status code of the event response message. + StatusCode *string `json:"statusCode,omitempty"` + // Version - The HTTP message version. + Version *string `json:"version,omitempty"` +} + +// MarshalJSON is the custom marshaler for EventResponseMessage. +func (erm EventResponseMessage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if erm.Content != nil { + objectMap["content"] = erm.Content + } + if erm.Headers != nil { + objectMap["headers"] = erm.Headers + } + if erm.ReasonPhrase != nil { + objectMap["reasonPhrase"] = erm.ReasonPhrase + } + if erm.StatusCode != nil { + objectMap["statusCode"] = erm.StatusCode + } + if erm.Version != nil { + objectMap["version"] = erm.Version + } + return json.Marshal(objectMap) +} + +// FileTaskRunRequest the request parameters for a scheduling run against a task file. +type FileTaskRunRequest struct { + // TaskFilePath - The template/definition file path relative to the source. + TaskFilePath *string `json:"taskFilePath,omitempty"` + // ValuesFilePath - The values/parameters file path relative to the source. + ValuesFilePath *string `json:"valuesFilePath,omitempty"` + // Values - The collection of overridable values that can be passed when running a task. + Values *[]SetValue `json:"values,omitempty"` + // Timeout - Run timeout in seconds. + Timeout *int32 `json:"timeout,omitempty"` + // Platform - The platform properties against which the run has to happen. + Platform *PlatformProperties `json:"platform,omitempty"` + // AgentConfiguration - The machine configuration of the run agent. + AgentConfiguration *AgentProperties `json:"agentConfiguration,omitempty"` + // SourceLocation - The URL(absolute or relative) of the source context. It can be an URL to a tar or git repository. + // If it is relative URL, the relative path should be obtained from calling listBuildSourceUploadUrl API. + SourceLocation *string `json:"sourceLocation,omitempty"` + // Credentials - The properties that describes a set of credentials that will be used when this run is invoked. + Credentials *Credentials `json:"credentials,omitempty"` + // IsArchiveEnabled - The value that indicates whether archiving is enabled for the run or not. + IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"` + // Type - Possible values include: 'TypeRunRequest', 'TypeDockerBuildRequest', 'TypeFileTaskRunRequest', 'TypeTaskRunRequest', 'TypeEncodedTaskRunRequest' + Type Type `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileTaskRunRequest. +func (ftrr FileTaskRunRequest) MarshalJSON() ([]byte, error) { + ftrr.Type = TypeFileTaskRunRequest + objectMap := make(map[string]interface{}) + if ftrr.TaskFilePath != nil { + objectMap["taskFilePath"] = ftrr.TaskFilePath + } + if ftrr.ValuesFilePath != nil { + objectMap["valuesFilePath"] = ftrr.ValuesFilePath + } + if ftrr.Values != nil { + objectMap["values"] = ftrr.Values + } + if ftrr.Timeout != nil { + objectMap["timeout"] = ftrr.Timeout + } + if ftrr.Platform != nil { + objectMap["platform"] = ftrr.Platform + } + if ftrr.AgentConfiguration != nil { + objectMap["agentConfiguration"] = ftrr.AgentConfiguration + } + if ftrr.SourceLocation != nil { + objectMap["sourceLocation"] = ftrr.SourceLocation + } + if ftrr.Credentials != nil { + objectMap["credentials"] = ftrr.Credentials + } + if ftrr.IsArchiveEnabled != nil { + objectMap["isArchiveEnabled"] = ftrr.IsArchiveEnabled + } + if ftrr.Type != "" { + objectMap["type"] = ftrr.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildRequest is the BasicRunRequest implementation for FileTaskRunRequest. +func (ftrr FileTaskRunRequest) AsDockerBuildRequest() (*DockerBuildRequest, bool) { + return nil, false +} + +// AsFileTaskRunRequest is the BasicRunRequest implementation for FileTaskRunRequest. +func (ftrr FileTaskRunRequest) AsFileTaskRunRequest() (*FileTaskRunRequest, bool) { + return &ftrr, true +} + +// AsTaskRunRequest is the BasicRunRequest implementation for FileTaskRunRequest. +func (ftrr FileTaskRunRequest) AsTaskRunRequest() (*TaskRunRequest, bool) { + return nil, false +} + +// AsEncodedTaskRunRequest is the BasicRunRequest implementation for FileTaskRunRequest. +func (ftrr FileTaskRunRequest) AsEncodedTaskRunRequest() (*EncodedTaskRunRequest, bool) { + return nil, false +} + +// AsRunRequest is the BasicRunRequest implementation for FileTaskRunRequest. +func (ftrr FileTaskRunRequest) AsRunRequest() (*RunRequest, bool) { + return nil, false +} + +// AsBasicRunRequest is the BasicRunRequest implementation for FileTaskRunRequest. +func (ftrr FileTaskRunRequest) AsBasicRunRequest() (BasicRunRequest, bool) { + return &ftrr, true +} + +// FileTaskStep the properties of a task step. +type FileTaskStep struct { + // TaskFilePath - The task template/definition file path relative to the source context. + TaskFilePath *string `json:"taskFilePath,omitempty"` + // ValuesFilePath - The task values/parameters file path relative to the source context. + ValuesFilePath *string `json:"valuesFilePath,omitempty"` + // Values - The collection of overridable values that can be passed when running a task. + Values *[]SetValue `json:"values,omitempty"` + // BaseImageDependencies - READ-ONLY; List of base image dependencies for a step. + BaseImageDependencies *[]BaseImageDependency `json:"baseImageDependencies,omitempty"` + // ContextPath - The URL(absolute or relative) of the source context for the task step. + ContextPath *string `json:"contextPath,omitempty"` + // ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step. + ContextAccessToken *string `json:"contextAccessToken,omitempty"` + // Type - Possible values include: 'TypeTaskStepProperties', 'TypeDocker', 'TypeFileTask', 'TypeEncodedTask' + Type TypeBasicTaskStepProperties `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileTaskStep. +func (fts FileTaskStep) MarshalJSON() ([]byte, error) { + fts.Type = TypeFileTask + objectMap := make(map[string]interface{}) + if fts.TaskFilePath != nil { + objectMap["taskFilePath"] = fts.TaskFilePath + } + if fts.ValuesFilePath != nil { + objectMap["valuesFilePath"] = fts.ValuesFilePath + } + if fts.Values != nil { + objectMap["values"] = fts.Values + } + if fts.ContextPath != nil { + objectMap["contextPath"] = fts.ContextPath + } + if fts.ContextAccessToken != nil { + objectMap["contextAccessToken"] = fts.ContextAccessToken + } + if fts.Type != "" { + objectMap["type"] = fts.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildStep is the BasicTaskStepProperties implementation for FileTaskStep. +func (fts FileTaskStep) AsDockerBuildStep() (*DockerBuildStep, bool) { + return nil, false +} + +// AsFileTaskStep is the BasicTaskStepProperties implementation for FileTaskStep. +func (fts FileTaskStep) AsFileTaskStep() (*FileTaskStep, bool) { + return &fts, true +} + +// AsEncodedTaskStep is the BasicTaskStepProperties implementation for FileTaskStep. +func (fts FileTaskStep) AsEncodedTaskStep() (*EncodedTaskStep, bool) { + return nil, false +} + +// AsTaskStepProperties is the BasicTaskStepProperties implementation for FileTaskStep. +func (fts FileTaskStep) AsTaskStepProperties() (*TaskStepProperties, bool) { + return nil, false +} + +// AsBasicTaskStepProperties is the BasicTaskStepProperties implementation for FileTaskStep. +func (fts FileTaskStep) AsBasicTaskStepProperties() (BasicTaskStepProperties, bool) { + return &fts, true +} + +// FileTaskStepUpdateParameters the properties of updating a task step. +type FileTaskStepUpdateParameters struct { + // TaskFilePath - The task template/definition file path relative to the source context. + TaskFilePath *string `json:"taskFilePath,omitempty"` + // ValuesFilePath - The values/parameters file path relative to the source context. + ValuesFilePath *string `json:"valuesFilePath,omitempty"` + // Values - The collection of overridable values that can be passed when running a task. + Values *[]SetValue `json:"values,omitempty"` + // ContextPath - The URL(absolute or relative) of the source context for the task step. + ContextPath *string `json:"contextPath,omitempty"` + // ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step. + ContextAccessToken *string `json:"contextAccessToken,omitempty"` + // Type - Possible values include: 'TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters', 'TypeBasicTaskStepUpdateParametersTypeDocker', 'TypeBasicTaskStepUpdateParametersTypeFileTask', 'TypeBasicTaskStepUpdateParametersTypeEncodedTask' + Type TypeBasicTaskStepUpdateParameters `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileTaskStepUpdateParameters. +func (ftsup FileTaskStepUpdateParameters) MarshalJSON() ([]byte, error) { + ftsup.Type = TypeBasicTaskStepUpdateParametersTypeFileTask + objectMap := make(map[string]interface{}) + if ftsup.TaskFilePath != nil { + objectMap["taskFilePath"] = ftsup.TaskFilePath + } + if ftsup.ValuesFilePath != nil { + objectMap["valuesFilePath"] = ftsup.ValuesFilePath + } + if ftsup.Values != nil { + objectMap["values"] = ftsup.Values + } + if ftsup.ContextPath != nil { + objectMap["contextPath"] = ftsup.ContextPath + } + if ftsup.ContextAccessToken != nil { + objectMap["contextAccessToken"] = ftsup.ContextAccessToken + } + if ftsup.Type != "" { + objectMap["type"] = ftsup.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for FileTaskStepUpdateParameters. +func (ftsup FileTaskStepUpdateParameters) AsDockerBuildStepUpdateParameters() (*DockerBuildStepUpdateParameters, bool) { + return nil, false +} + +// AsFileTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for FileTaskStepUpdateParameters. +func (ftsup FileTaskStepUpdateParameters) AsFileTaskStepUpdateParameters() (*FileTaskStepUpdateParameters, bool) { + return &ftsup, true +} + +// AsEncodedTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for FileTaskStepUpdateParameters. +func (ftsup FileTaskStepUpdateParameters) AsEncodedTaskStepUpdateParameters() (*EncodedTaskStepUpdateParameters, bool) { + return nil, false +} + +// AsTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for FileTaskStepUpdateParameters. +func (ftsup FileTaskStepUpdateParameters) AsTaskStepUpdateParameters() (*TaskStepUpdateParameters, bool) { + return nil, false +} + +// AsBasicTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for FileTaskStepUpdateParameters. +func (ftsup FileTaskStepUpdateParameters) AsBasicTaskStepUpdateParameters() (BasicTaskStepUpdateParameters, bool) { + return &ftsup, true +} + +// IdentityProperties managed identity for the resource. +type IdentityProperties struct { + // PrincipalID - The principal ID of resource identity. + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - The tenant ID of resource. + TenantID *string `json:"tenantId,omitempty"` + // Type - The identity type. Possible values include: 'SystemAssigned', 'UserAssigned', 'SystemAssignedUserAssigned', 'None' + Type ResourceIdentityType `json:"type,omitempty"` + // UserAssignedIdentities - The list of user identities associated with the resource. The user identity + // dictionary key references will be ARM resource ids in the form: + // '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/ + // providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + UserAssignedIdentities map[string]*UserIdentityProperties `json:"userAssignedIdentities"` +} + +// MarshalJSON is the custom marshaler for IdentityProperties. +func (IP IdentityProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if IP.PrincipalID != nil { + objectMap["principalId"] = IP.PrincipalID + } + if IP.TenantID != nil { + objectMap["tenantId"] = IP.TenantID + } + if IP.Type != "" { + objectMap["type"] = IP.Type + } + if IP.UserAssignedIdentities != nil { + objectMap["userAssignedIdentities"] = IP.UserAssignedIdentities + } + return json.Marshal(objectMap) +} + +// ImageDescriptor properties for a registry image. +type ImageDescriptor struct { + // Registry - The registry login server. + Registry *string `json:"registry,omitempty"` + // Repository - The repository name. + Repository *string `json:"repository,omitempty"` + // Tag - The tag name. + Tag *string `json:"tag,omitempty"` + // Digest - The sha256-based digest of the image manifest. + Digest *string `json:"digest,omitempty"` +} + +// ImageUpdateTrigger the image update trigger that caused a build. +type ImageUpdateTrigger struct { + // ID - The unique ID of the trigger. + ID *string `json:"id,omitempty"` + // Timestamp - The timestamp when the image update happened. + Timestamp *date.Time `json:"timestamp,omitempty"` + // Images - The list of image updates that caused the build. + Images *[]ImageDescriptor `json:"images,omitempty"` +} + +// ImportImageParameters ... +type ImportImageParameters struct { + // Source - The source of the image. + Source *ImportSource `json:"source,omitempty"` + // TargetTags - List of strings of the form repo[:tag]. When tag is omitted the source will be used (or 'latest' if source tag is also omitted). + TargetTags *[]string `json:"targetTags,omitempty"` + // UntaggedTargetRepositories - List of strings of repository names to do a manifest only copy. No tag will be created. + UntaggedTargetRepositories *[]string `json:"untaggedTargetRepositories,omitempty"` + // Mode - When Force, any existing target tags will be overwritten. When NoForce, any existing target tags will fail the operation before any copying begins. Possible values include: 'NoForce', 'Force' + Mode ImportMode `json:"mode,omitempty"` +} + +// ImportSource ... +type ImportSource struct { + // ResourceID - The resource identifier of the source Azure Container Registry. + ResourceID *string `json:"resourceId,omitempty"` + // RegistryURI - The address of the source registry (e.g. 'mcr.microsoft.com'). + RegistryURI *string `json:"registryUri,omitempty"` + // Credentials - Credentials used when importing from a registry uri. + Credentials *ImportSourceCredentials `json:"credentials,omitempty"` + // SourceImage - Repository name of the source image. + // Specify an image by repository ('hello-world'). This will use the 'latest' tag. + // Specify an image by tag ('hello-world:latest'). + // Specify an image by sha256-based manifest digest ('hello-world@sha256:abc123'). + SourceImage *string `json:"sourceImage,omitempty"` +} + +// ImportSourceCredentials ... +type ImportSourceCredentials struct { + // Username - The username to authenticate with the source registry. + Username *string `json:"username,omitempty"` + // Password - The password used to authenticate with the source registry. + Password *string `json:"password,omitempty"` +} + +// IPRule IP rule with specific IP or IP range in CIDR format. +type IPRule struct { + // Action - The action of IP ACL rule. Possible values include: 'Allow' + Action Action `json:"action,omitempty"` + // IPAddressOrRange - Specifies the IP or IP range in CIDR format. Only IPV4 address is allowed. + IPAddressOrRange *string `json:"value,omitempty"` +} + +// NetworkRuleSet the network rule set for a container registry. +type NetworkRuleSet struct { + // DefaultAction - The default action of allow or deny when no other rules match. Possible values include: 'DefaultActionAllow', 'DefaultActionDeny' + DefaultAction DefaultAction `json:"defaultAction,omitempty"` + // VirtualNetworkRules - The virtual network rules. + VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"` + // IPRules - The IP ACL rules. + IPRules *[]IPRule `json:"ipRules,omitempty"` +} + +// OperationDefinition the definition of a container registry operation. +type OperationDefinition struct { + // Origin - The origin information of the container registry operation. + Origin *string `json:"origin,omitempty"` + // Name - Operation name: {provider}/{resource}/{operation}. + Name *string `json:"name,omitempty"` + // Display - The display information for the container registry operation. + Display *OperationDisplayDefinition `json:"display,omitempty"` + // OperationPropertiesDefinition - The properties information for the container registry operation. + *OperationPropertiesDefinition `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for OperationDefinition. +func (od OperationDefinition) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if od.Origin != nil { + objectMap["origin"] = od.Origin + } + if od.Name != nil { + objectMap["name"] = od.Name + } + if od.Display != nil { + objectMap["display"] = od.Display + } + if od.OperationPropertiesDefinition != nil { + objectMap["properties"] = od.OperationPropertiesDefinition + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for OperationDefinition struct. +func (od *OperationDefinition) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "origin": + if v != nil { + var origin string + err = json.Unmarshal(*v, &origin) + if err != nil { + return err + } + od.Origin = &origin + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + od.Name = &name + } + case "display": + if v != nil { + var display OperationDisplayDefinition + err = json.Unmarshal(*v, &display) + if err != nil { + return err + } + od.Display = &display + } + case "properties": + if v != nil { + var operationPropertiesDefinition OperationPropertiesDefinition + err = json.Unmarshal(*v, &operationPropertiesDefinition) + if err != nil { + return err + } + od.OperationPropertiesDefinition = &operationPropertiesDefinition + } + } + } + + return nil +} + +// OperationDisplayDefinition the display information for a container registry operation. +type OperationDisplayDefinition struct { + // Provider - The resource provider name: Microsoft.ContainerRegistry. + Provider *string `json:"provider,omitempty"` + // Resource - The resource on which the operation is performed. + Resource *string `json:"resource,omitempty"` + // Operation - The operation that users can perform. + Operation *string `json:"operation,omitempty"` + // Description - The description for the operation. + Description *string `json:"description,omitempty"` +} + +// OperationListResult the result of a request to list container registry operations. +type OperationListResult struct { + autorest.Response `json:"-"` + // Value - The list of container registry operations. Since this list may be incomplete, the nextLink field should be used to request the next list of operations. + Value *[]OperationDefinition `json:"value,omitempty"` + // NextLink - The URI that can be used to request the next list of container registry operations. + NextLink *string `json:"nextLink,omitempty"` +} + +// OperationListResultIterator provides access to a complete listing of OperationDefinition values. +type OperationListResultIterator struct { + i int + page OperationListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *OperationListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *OperationListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter OperationListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter OperationListResultIterator) Response() OperationListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter OperationListResultIterator) Value() OperationDefinition { + if !iter.page.NotDone() { + return OperationDefinition{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the OperationListResultIterator type. +func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator { + return OperationListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (olr OperationListResult) IsEmpty() bool { + return olr.Value == nil || len(*olr.Value) == 0 +} + +// operationListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (olr OperationListResult) operationListResultPreparer(ctx context.Context) (*http.Request, error) { + if olr.NextLink == nil || len(to.String(olr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(olr.NextLink))) +} + +// OperationListResultPage contains a page of OperationDefinition values. +type OperationListResultPage struct { + fn func(context.Context, OperationListResult) (OperationListResult, error) + olr OperationListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *OperationListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.olr) + if err != nil { + return err + } + page.olr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *OperationListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page OperationListResultPage) NotDone() bool { + return !page.olr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page OperationListResultPage) Response() OperationListResult { + return page.olr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page OperationListResultPage) Values() []OperationDefinition { + if page.olr.IsEmpty() { + return nil + } + return *page.olr.Value +} + +// Creates a new instance of the OperationListResultPage type. +func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage { + return OperationListResultPage{fn: getNextPage} +} + +// OperationMetricSpecificationDefinition the definition of Azure Monitoring metric. +type OperationMetricSpecificationDefinition struct { + // Name - Metric name. + Name *string `json:"name,omitempty"` + // DisplayName - Metric display name. + DisplayName *string `json:"displayName,omitempty"` + // DisplayDescription - Metric description. + DisplayDescription *string `json:"displayDescription,omitempty"` + // Unit - Metric unit. + Unit *string `json:"unit,omitempty"` + // AggregationType - Metric aggregation type. + AggregationType *string `json:"aggregationType,omitempty"` + // InternalMetricName - Internal metric name. + InternalMetricName *string `json:"internalMetricName,omitempty"` +} + +// OperationPropertiesDefinition the definition of Azure Monitoring properties. +type OperationPropertiesDefinition struct { + // ServiceSpecification - The definition of Azure Monitoring service. + ServiceSpecification *OperationServiceSpecificationDefinition `json:"serviceSpecification,omitempty"` +} + +// OperationServiceSpecificationDefinition the definition of Azure Monitoring list. +type OperationServiceSpecificationDefinition struct { + // MetricSpecifications - A list of Azure Monitoring metrics definition. + MetricSpecifications *[]OperationMetricSpecificationDefinition `json:"metricSpecifications,omitempty"` +} + +// PlatformProperties the platform properties against which the run has to happen. +type PlatformProperties struct { + // Os - The operating system type required for the run. Possible values include: 'Windows', 'Linux' + Os OS `json:"os,omitempty"` + // Architecture - The OS architecture. Possible values include: 'Amd64', 'X86', 'Arm' + Architecture Architecture `json:"architecture,omitempty"` + // Variant - Variant of the CPU. Possible values include: 'V6', 'V7', 'V8' + Variant Variant `json:"variant,omitempty"` +} + +// PlatformUpdateParameters the properties for updating the platform configuration. +type PlatformUpdateParameters struct { + // Os - The operating system type required for the run. Possible values include: 'Windows', 'Linux' + Os OS `json:"os,omitempty"` + // Architecture - The OS architecture. Possible values include: 'Amd64', 'X86', 'Arm' + Architecture Architecture `json:"architecture,omitempty"` + // Variant - Variant of the CPU. Possible values include: 'V6', 'V7', 'V8' + Variant Variant `json:"variant,omitempty"` +} + +// Policies the policies for a container registry. +type Policies struct { + // QuarantinePolicy - The quarantine policy for a container registry. + QuarantinePolicy *QuarantinePolicy `json:"quarantinePolicy,omitempty"` + // TrustPolicy - The content trust policy for a container registry. + TrustPolicy *TrustPolicy `json:"trustPolicy,omitempty"` + // RetentionPolicy - The retention policy for a container registry. + RetentionPolicy *RetentionPolicy `json:"retentionPolicy,omitempty"` +} + +// ProxyResource the resource model definition for a ARM proxy resource. It will have everything other than +// required location and tags. +type ProxyResource struct { + // ID - READ-ONLY; The resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. + Type *string `json:"type,omitempty"` +} + +// QuarantinePolicy the quarantine policy for a container registry. +type QuarantinePolicy struct { + // Status - The value that indicates whether the policy is enabled or not. Possible values include: 'Enabled', 'Disabled' + Status PolicyStatus `json:"status,omitempty"` +} + +// RegenerateCredentialParameters the parameters used to regenerate the login credential. +type RegenerateCredentialParameters struct { + // Name - Specifies name of the password which should be regenerated -- password or password2. Possible values include: 'Password', 'Password2' + Name PasswordName `json:"name,omitempty"` +} + +// RegistriesCreateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type RegistriesCreateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *RegistriesCreateFuture) Result(client RegistriesClient) (r Registry, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.CreateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesCreateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + +// RegistriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type RegistriesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *RegistriesDeleteFuture) Result(client RegistriesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// RegistriesImportImageFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type RegistriesImportImageFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *RegistriesImportImageFuture) Result(client RegistriesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesImportImageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesImportImageFuture") + return + } + ar.Response = future.Response() + return +} + +// RegistriesScheduleRunFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type RegistriesScheduleRunFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *RegistriesScheduleRunFuture) Result(client RegistriesClient) (r Run, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesScheduleRunFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesScheduleRunFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.ScheduleRunResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesScheduleRunFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + +// RegistriesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type RegistriesUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *RegistriesUpdateFuture) Result(client RegistriesClient) (r Registry, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.UpdateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesUpdateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + +// Registry an object that represents a container registry. +type Registry struct { + autorest.Response `json:"-"` + // Sku - The SKU of the container registry. + Sku *Sku `json:"sku,omitempty"` + // RegistryProperties - The properties of the container registry. + *RegistryProperties `json:"properties,omitempty"` + // ID - READ-ONLY; The resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. + Type *string `json:"type,omitempty"` + // Location - The location of the resource. This cannot be changed after the resource is created. + Location *string `json:"location,omitempty"` + // Tags - The tags of the resource. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Registry. +func (r Registry) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.Sku != nil { + objectMap["sku"] = r.Sku + } + if r.RegistryProperties != nil { + objectMap["properties"] = r.RegistryProperties + } + if r.Location != nil { + objectMap["location"] = r.Location + } + if r.Tags != nil { + objectMap["tags"] = r.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Registry struct. +func (r *Registry) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + r.Sku = &sku + } + case "properties": + if v != nil { + var registryProperties RegistryProperties + err = json.Unmarshal(*v, ®istryProperties) + if err != nil { + return err + } + r.RegistryProperties = ®istryProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + r.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + r.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + r.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + r.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + r.Tags = tags + } + } + } + + return nil +} + +// RegistryListCredentialsResult the response from the ListCredentials operation. +type RegistryListCredentialsResult struct { + autorest.Response `json:"-"` + // Username - The username for a container registry. + Username *string `json:"username,omitempty"` + // Passwords - The list of passwords for a container registry. + Passwords *[]RegistryPassword `json:"passwords,omitempty"` +} + +// RegistryListResult the result of a request to list container registries. +type RegistryListResult struct { + autorest.Response `json:"-"` + // Value - The list of container registries. Since this list may be incomplete, the nextLink field should be used to request the next list of container registries. + Value *[]Registry `json:"value,omitempty"` + // NextLink - The URI that can be used to request the next list of container registries. + NextLink *string `json:"nextLink,omitempty"` +} + +// RegistryListResultIterator provides access to a complete listing of Registry values. +type RegistryListResultIterator struct { + i int + page RegistryListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *RegistryListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistryListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *RegistryListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter RegistryListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter RegistryListResultIterator) Response() RegistryListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter RegistryListResultIterator) Value() Registry { + if !iter.page.NotDone() { + return Registry{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the RegistryListResultIterator type. +func NewRegistryListResultIterator(page RegistryListResultPage) RegistryListResultIterator { + return RegistryListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rlr RegistryListResult) IsEmpty() bool { + return rlr.Value == nil || len(*rlr.Value) == 0 +} + +// registryListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rlr RegistryListResult) registryListResultPreparer(ctx context.Context) (*http.Request, error) { + if rlr.NextLink == nil || len(to.String(rlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rlr.NextLink))) +} + +// RegistryListResultPage contains a page of Registry values. +type RegistryListResultPage struct { + fn func(context.Context, RegistryListResult) (RegistryListResult, error) + rlr RegistryListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *RegistryListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistryListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.rlr) + if err != nil { + return err + } + page.rlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *RegistryListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page RegistryListResultPage) NotDone() bool { + return !page.rlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page RegistryListResultPage) Response() RegistryListResult { + return page.rlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page RegistryListResultPage) Values() []Registry { + if page.rlr.IsEmpty() { + return nil + } + return *page.rlr.Value +} + +// Creates a new instance of the RegistryListResultPage type. +func NewRegistryListResultPage(getNextPage func(context.Context, RegistryListResult) (RegistryListResult, error)) RegistryListResultPage { + return RegistryListResultPage{fn: getNextPage} +} + +// RegistryNameCheckRequest a request to check whether a container registry name is available. +type RegistryNameCheckRequest struct { + // Name - The name of the container registry. + Name *string `json:"name,omitempty"` + // Type - The resource type of the container registry. This field must be set to 'Microsoft.ContainerRegistry/registries'. + Type *string `json:"type,omitempty"` +} + +// RegistryNameStatus the result of a request to check the availability of a container registry name. +type RegistryNameStatus struct { + autorest.Response `json:"-"` + // NameAvailable - The value that indicates whether the name is available. + NameAvailable *bool `json:"nameAvailable,omitempty"` + // Reason - If any, the reason that the name is not available. + Reason *string `json:"reason,omitempty"` + // Message - If any, the error message that provides more detail for the reason that the name is not available. + Message *string `json:"message,omitempty"` +} + +// RegistryPassword the login password for the container registry. +type RegistryPassword struct { + // Name - The password name. Possible values include: 'Password', 'Password2' + Name PasswordName `json:"name,omitempty"` + // Value - The password value. + Value *string `json:"value,omitempty"` +} + +// RegistryProperties the properties of a container registry. +type RegistryProperties struct { + // LoginServer - READ-ONLY; The URL that can be used to log into the container registry. + LoginServer *string `json:"loginServer,omitempty"` + // CreationDate - READ-ONLY; The creation date of the container registry in ISO8601 format. + CreationDate *date.Time `json:"creationDate,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state of the container registry at the time the operation was called. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + // Status - READ-ONLY; The status of the container registry at the time the operation was called. + Status *Status `json:"status,omitempty"` + // AdminUserEnabled - The value that indicates whether the admin user is enabled. + AdminUserEnabled *bool `json:"adminUserEnabled,omitempty"` + // StorageAccount - The properties of the storage account for the container registry. Only applicable to Classic SKU. + StorageAccount *StorageAccountProperties `json:"storageAccount,omitempty"` + // NetworkRuleSet - The network rule set for a container registry. + NetworkRuleSet *NetworkRuleSet `json:"networkRuleSet,omitempty"` + // Policies - The policies for a container registry. + Policies *Policies `json:"policies,omitempty"` +} + +// RegistryPropertiesUpdateParameters the parameters for updating the properties of a container registry. +type RegistryPropertiesUpdateParameters struct { + // AdminUserEnabled - The value that indicates whether the admin user is enabled. + AdminUserEnabled *bool `json:"adminUserEnabled,omitempty"` + // NetworkRuleSet - The network rule set for a container registry. + NetworkRuleSet *NetworkRuleSet `json:"networkRuleSet,omitempty"` + // Policies - The policies for a container registry. + Policies *Policies `json:"policies,omitempty"` +} + +// RegistryUpdateParameters the parameters for updating a container registry. +type RegistryUpdateParameters struct { + // Tags - The tags for the container registry. + Tags map[string]*string `json:"tags"` + // Sku - The SKU of the container registry. + Sku *Sku `json:"sku,omitempty"` + // RegistryPropertiesUpdateParameters - The properties that the container registry will be updated with. + *RegistryPropertiesUpdateParameters `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for RegistryUpdateParameters. +func (rup RegistryUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rup.Tags != nil { + objectMap["tags"] = rup.Tags + } + if rup.Sku != nil { + objectMap["sku"] = rup.Sku + } + if rup.RegistryPropertiesUpdateParameters != nil { + objectMap["properties"] = rup.RegistryPropertiesUpdateParameters + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for RegistryUpdateParameters struct. +func (rup *RegistryUpdateParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + rup.Tags = tags + } + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + rup.Sku = &sku + } + case "properties": + if v != nil { + var registryPropertiesUpdateParameters RegistryPropertiesUpdateParameters + err = json.Unmarshal(*v, ®istryPropertiesUpdateParameters) + if err != nil { + return err + } + rup.RegistryPropertiesUpdateParameters = ®istryPropertiesUpdateParameters + } + } + } + + return nil +} + +// RegistryUsage the quota usage for a container registry. +type RegistryUsage struct { + // Name - The name of the usage. + Name *string `json:"name,omitempty"` + // Limit - The limit of the usage. + Limit *int64 `json:"limit,omitempty"` + // CurrentValue - The current value of the usage. + CurrentValue *int64 `json:"currentValue,omitempty"` + // Unit - The unit of measurement. Possible values include: 'Count', 'Bytes' + Unit RegistryUsageUnit `json:"unit,omitempty"` +} + +// RegistryUsageListResult the result of a request to get container registry quota usages. +type RegistryUsageListResult struct { + autorest.Response `json:"-"` + // Value - The list of container registry quota usages. + Value *[]RegistryUsage `json:"value,omitempty"` +} + +// Replication an object that represents a replication for a container registry. +type Replication struct { + autorest.Response `json:"-"` + // ReplicationProperties - The properties of the replication. + *ReplicationProperties `json:"properties,omitempty"` + // ID - READ-ONLY; The resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. + Type *string `json:"type,omitempty"` + // Location - The location of the resource. This cannot be changed after the resource is created. + Location *string `json:"location,omitempty"` + // Tags - The tags of the resource. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Replication. +func (r Replication) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.ReplicationProperties != nil { + objectMap["properties"] = r.ReplicationProperties + } + if r.Location != nil { + objectMap["location"] = r.Location + } + if r.Tags != nil { + objectMap["tags"] = r.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Replication struct. +func (r *Replication) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var replicationProperties ReplicationProperties + err = json.Unmarshal(*v, &replicationProperties) + if err != nil { + return err + } + r.ReplicationProperties = &replicationProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + r.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + r.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + r.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + r.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + r.Tags = tags + } + } + } + + return nil +} + +// ReplicationListResult the result of a request to list replications for a container registry. +type ReplicationListResult struct { + autorest.Response `json:"-"` + // Value - The list of replications. Since this list may be incomplete, the nextLink field should be used to request the next list of replications. + Value *[]Replication `json:"value,omitempty"` + // NextLink - The URI that can be used to request the next list of replications. + NextLink *string `json:"nextLink,omitempty"` +} + +// ReplicationListResultIterator provides access to a complete listing of Replication values. +type ReplicationListResultIterator struct { + i int + page ReplicationListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ReplicationListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ReplicationListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ReplicationListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ReplicationListResultIterator) Response() ReplicationListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ReplicationListResultIterator) Value() Replication { + if !iter.page.NotDone() { + return Replication{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ReplicationListResultIterator type. +func NewReplicationListResultIterator(page ReplicationListResultPage) ReplicationListResultIterator { + return ReplicationListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rlr ReplicationListResult) IsEmpty() bool { + return rlr.Value == nil || len(*rlr.Value) == 0 +} + +// replicationListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rlr ReplicationListResult) replicationListResultPreparer(ctx context.Context) (*http.Request, error) { + if rlr.NextLink == nil || len(to.String(rlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rlr.NextLink))) +} + +// ReplicationListResultPage contains a page of Replication values. +type ReplicationListResultPage struct { + fn func(context.Context, ReplicationListResult) (ReplicationListResult, error) + rlr ReplicationListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ReplicationListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.rlr) + if err != nil { + return err + } + page.rlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ReplicationListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ReplicationListResultPage) NotDone() bool { + return !page.rlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ReplicationListResultPage) Response() ReplicationListResult { + return page.rlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ReplicationListResultPage) Values() []Replication { + if page.rlr.IsEmpty() { + return nil + } + return *page.rlr.Value +} + +// Creates a new instance of the ReplicationListResultPage type. +func NewReplicationListResultPage(getNextPage func(context.Context, ReplicationListResult) (ReplicationListResult, error)) ReplicationListResultPage { + return ReplicationListResultPage{fn: getNextPage} +} + +// ReplicationProperties the properties of a replication. +type ReplicationProperties struct { + // ProvisioningState - READ-ONLY; The provisioning state of the replication at the time the operation was called. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + // Status - READ-ONLY; The status of the replication at the time the operation was called. + Status *Status `json:"status,omitempty"` +} + +// ReplicationsCreateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ReplicationsCreateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ReplicationsCreateFuture) Result(client ReplicationsClient) (r Replication, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ReplicationsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.CreateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsCreateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + +// ReplicationsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ReplicationsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ReplicationsDeleteFuture) Result(client ReplicationsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ReplicationsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// ReplicationsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ReplicationsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ReplicationsUpdateFuture) Result(client ReplicationsClient) (r Replication, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ReplicationsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.UpdateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsUpdateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + +// ReplicationUpdateParameters the parameters for updating a replication. +type ReplicationUpdateParameters struct { + // Tags - The tags for the replication. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ReplicationUpdateParameters. +func (rup ReplicationUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rup.Tags != nil { + objectMap["tags"] = rup.Tags + } + return json.Marshal(objectMap) +} + +// Request the request that generated the event. +type Request struct { + // ID - The ID of the request that initiated the event. + ID *string `json:"id,omitempty"` + // Addr - The IP or hostname and possibly port of the client connection that initiated the event. This is the RemoteAddr from the standard http request. + Addr *string `json:"addr,omitempty"` + // Host - The externally accessible hostname of the registry instance, as specified by the http host header on incoming requests. + Host *string `json:"host,omitempty"` + // Method - The request method that generated the event. + Method *string `json:"method,omitempty"` + // Useragent - The user agent header of the request. + Useragent *string `json:"useragent,omitempty"` +} + +// Resource an Azure resource. +type Resource struct { + // ID - READ-ONLY; The resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. + Type *string `json:"type,omitempty"` + // Location - The location of the resource. This cannot be changed after the resource is created. + Location *string `json:"location,omitempty"` + // Tags - The tags of the resource. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.Location != nil { + objectMap["location"] = r.Location + } + if r.Tags != nil { + objectMap["tags"] = r.Tags + } + return json.Marshal(objectMap) +} + +// RetentionPolicy the retention policy for a container registry. +type RetentionPolicy struct { + // Days - The number of days to retain an untagged manifest after which it gets purged. + Days *int32 `json:"days,omitempty"` + // LastUpdatedTime - READ-ONLY; The timestamp when the policy was last updated. + LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` + // Status - The value that indicates whether the policy is enabled or not. Possible values include: 'Enabled', 'Disabled' + Status PolicyStatus `json:"status,omitempty"` +} + +// Run run resource properties +type Run struct { + autorest.Response `json:"-"` + // RunProperties - The properties of a run. + *RunProperties `json:"properties,omitempty"` + // ID - READ-ONLY; The resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Run. +func (r Run) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.RunProperties != nil { + objectMap["properties"] = r.RunProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Run struct. +func (r *Run) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var runProperties RunProperties + err = json.Unmarshal(*v, &runProperties) + if err != nil { + return err + } + r.RunProperties = &runProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + r.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + r.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + r.Type = &typeVar + } + } + } + + return nil +} + +// RunFilter properties that are enabled for Odata querying on runs. +type RunFilter struct { + // RunID - The unique identifier for the run. + RunID *string `json:"runId,omitempty"` + // RunType - The type of run. Possible values include: 'QuickBuild', 'QuickRun', 'AutoBuild', 'AutoRun' + RunType RunType `json:"runType,omitempty"` + // Status - The current status of the run. Possible values include: 'RunStatusQueued', 'RunStatusStarted', 'RunStatusRunning', 'RunStatusSucceeded', 'RunStatusFailed', 'RunStatusCanceled', 'RunStatusError', 'RunStatusTimeout' + Status RunStatus `json:"status,omitempty"` + // CreateTime - The create time for a run. + CreateTime *date.Time `json:"createTime,omitempty"` + // FinishTime - The time the run finished. + FinishTime *date.Time `json:"finishTime,omitempty"` + // OutputImageManifests - The list of comma-separated image manifests that were generated from the run. This is applicable if the run is of + // build type. + OutputImageManifests *string `json:"outputImageManifests,omitempty"` + // IsArchiveEnabled - The value that indicates whether archiving is enabled or not. + IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"` + // TaskName - The name of the task that the run corresponds to. + TaskName *string `json:"taskName,omitempty"` +} + +// RunGetLogResult the result of get log link operation. +type RunGetLogResult struct { + autorest.Response `json:"-"` + // LogLink - The link to logs for a run on a azure container registry. + LogLink *string `json:"logLink,omitempty"` +} + +// RunListResult collection of runs. +type RunListResult struct { + autorest.Response `json:"-"` + // Value - The collection value. + Value *[]Run `json:"value,omitempty"` + // NextLink - The URI that can be used to request the next set of paged results. + NextLink *string `json:"nextLink,omitempty"` +} + +// RunListResultIterator provides access to a complete listing of Run values. +type RunListResultIterator struct { + i int + page RunListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *RunListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RunListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *RunListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter RunListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter RunListResultIterator) Response() RunListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter RunListResultIterator) Value() Run { + if !iter.page.NotDone() { + return Run{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the RunListResultIterator type. +func NewRunListResultIterator(page RunListResultPage) RunListResultIterator { + return RunListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rlr RunListResult) IsEmpty() bool { + return rlr.Value == nil || len(*rlr.Value) == 0 +} + +// runListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rlr RunListResult) runListResultPreparer(ctx context.Context) (*http.Request, error) { + if rlr.NextLink == nil || len(to.String(rlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rlr.NextLink))) +} + +// RunListResultPage contains a page of Run values. +type RunListResultPage struct { + fn func(context.Context, RunListResult) (RunListResult, error) + rlr RunListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *RunListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RunListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.rlr) + if err != nil { + return err + } + page.rlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *RunListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page RunListResultPage) NotDone() bool { + return !page.rlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page RunListResultPage) Response() RunListResult { + return page.rlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page RunListResultPage) Values() []Run { + if page.rlr.IsEmpty() { + return nil + } + return *page.rlr.Value +} + +// Creates a new instance of the RunListResultPage type. +func NewRunListResultPage(getNextPage func(context.Context, RunListResult) (RunListResult, error)) RunListResultPage { + return RunListResultPage{fn: getNextPage} +} + +// RunProperties the properties for a run. +type RunProperties struct { + // RunID - The unique identifier for the run. + RunID *string `json:"runId,omitempty"` + // Status - The current status of the run. Possible values include: 'RunStatusQueued', 'RunStatusStarted', 'RunStatusRunning', 'RunStatusSucceeded', 'RunStatusFailed', 'RunStatusCanceled', 'RunStatusError', 'RunStatusTimeout' + Status RunStatus `json:"status,omitempty"` + // LastUpdatedTime - The last updated time for the run. + LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` + // RunType - The type of run. Possible values include: 'QuickBuild', 'QuickRun', 'AutoBuild', 'AutoRun' + RunType RunType `json:"runType,omitempty"` + // CreateTime - The time the run was scheduled. + CreateTime *date.Time `json:"createTime,omitempty"` + // StartTime - The time the run started. + StartTime *date.Time `json:"startTime,omitempty"` + // FinishTime - The time the run finished. + FinishTime *date.Time `json:"finishTime,omitempty"` + // OutputImages - The list of all images that were generated from the run. This is applicable if the run generates base image dependencies. + OutputImages *[]ImageDescriptor `json:"outputImages,omitempty"` + // Task - The task against which run was scheduled. + Task *string `json:"task,omitempty"` + // ImageUpdateTrigger - The image update trigger that caused the run. This is applicable if the task has base image trigger configured. + ImageUpdateTrigger *ImageUpdateTrigger `json:"imageUpdateTrigger,omitempty"` + // SourceTrigger - The source trigger that caused the run. + SourceTrigger *SourceTriggerDescriptor `json:"sourceTrigger,omitempty"` + // Platform - The platform properties against which the run will happen. + Platform *PlatformProperties `json:"platform,omitempty"` + // AgentConfiguration - The machine configuration of the run agent. + AgentConfiguration *AgentProperties `json:"agentConfiguration,omitempty"` + // SourceRegistryAuth - The scope of the credentials that were used to login to the source registry during this run. + SourceRegistryAuth *string `json:"sourceRegistryAuth,omitempty"` + // CustomRegistries - The list of custom registries that were logged in during this run. + CustomRegistries *[]string `json:"customRegistries,omitempty"` + // RunErrorMessage - READ-ONLY; The error message received from backend systems after the run is scheduled. + RunErrorMessage *string `json:"runErrorMessage,omitempty"` + // ProvisioningState - The provisioning state of a run. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + // IsArchiveEnabled - The value that indicates whether archiving is enabled or not. + IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"` + // TimerTrigger - The timer trigger that caused the run. + TimerTrigger *TimerTriggerDescriptor `json:"timerTrigger,omitempty"` +} + +// BasicRunRequest the request parameters for scheduling a run. +type BasicRunRequest interface { + AsDockerBuildRequest() (*DockerBuildRequest, bool) + AsFileTaskRunRequest() (*FileTaskRunRequest, bool) + AsTaskRunRequest() (*TaskRunRequest, bool) + AsEncodedTaskRunRequest() (*EncodedTaskRunRequest, bool) + AsRunRequest() (*RunRequest, bool) +} + +// RunRequest the request parameters for scheduling a run. +type RunRequest struct { + // IsArchiveEnabled - The value that indicates whether archiving is enabled for the run or not. + IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"` + // Type - Possible values include: 'TypeRunRequest', 'TypeDockerBuildRequest', 'TypeFileTaskRunRequest', 'TypeTaskRunRequest', 'TypeEncodedTaskRunRequest' + Type Type `json:"type,omitempty"` +} + +func unmarshalBasicRunRequest(body []byte) (BasicRunRequest, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["type"] { + case string(TypeDockerBuildRequest): + var dbr DockerBuildRequest + err := json.Unmarshal(body, &dbr) + return dbr, err + case string(TypeFileTaskRunRequest): + var ftrr FileTaskRunRequest + err := json.Unmarshal(body, &ftrr) + return ftrr, err + case string(TypeTaskRunRequest): + var trr TaskRunRequest + err := json.Unmarshal(body, &trr) + return trr, err + case string(TypeEncodedTaskRunRequest): + var etrr EncodedTaskRunRequest + err := json.Unmarshal(body, &etrr) + return etrr, err + default: + var rr RunRequest + err := json.Unmarshal(body, &rr) + return rr, err + } +} +func unmarshalBasicRunRequestArray(body []byte) ([]BasicRunRequest, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + rrArray := make([]BasicRunRequest, len(rawMessages)) + + for index, rawMessage := range rawMessages { + rr, err := unmarshalBasicRunRequest(*rawMessage) + if err != nil { + return nil, err + } + rrArray[index] = rr + } + return rrArray, nil +} + +// MarshalJSON is the custom marshaler for RunRequest. +func (rr RunRequest) MarshalJSON() ([]byte, error) { + rr.Type = TypeRunRequest + objectMap := make(map[string]interface{}) + if rr.IsArchiveEnabled != nil { + objectMap["isArchiveEnabled"] = rr.IsArchiveEnabled + } + if rr.Type != "" { + objectMap["type"] = rr.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildRequest is the BasicRunRequest implementation for RunRequest. +func (rr RunRequest) AsDockerBuildRequest() (*DockerBuildRequest, bool) { + return nil, false +} + +// AsFileTaskRunRequest is the BasicRunRequest implementation for RunRequest. +func (rr RunRequest) AsFileTaskRunRequest() (*FileTaskRunRequest, bool) { + return nil, false +} + +// AsTaskRunRequest is the BasicRunRequest implementation for RunRequest. +func (rr RunRequest) AsTaskRunRequest() (*TaskRunRequest, bool) { + return nil, false +} + +// AsEncodedTaskRunRequest is the BasicRunRequest implementation for RunRequest. +func (rr RunRequest) AsEncodedTaskRunRequest() (*EncodedTaskRunRequest, bool) { + return nil, false +} + +// AsRunRequest is the BasicRunRequest implementation for RunRequest. +func (rr RunRequest) AsRunRequest() (*RunRequest, bool) { + return &rr, true +} + +// AsBasicRunRequest is the BasicRunRequest implementation for RunRequest. +func (rr RunRequest) AsBasicRunRequest() (BasicRunRequest, bool) { + return &rr, true +} + +// RunsCancelFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type RunsCancelFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *RunsCancelFuture) Result(client RunsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsCancelFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RunsCancelFuture") + return + } + ar.Response = future.Response() + return +} + +// RunsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type RunsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *RunsUpdateFuture) Result(client RunsClient) (r Run, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RunsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.UpdateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsUpdateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + +// RunUpdateParameters the set of run properties that can be updated. +type RunUpdateParameters struct { + // IsArchiveEnabled - The value that indicates whether archiving is enabled or not. + IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"` +} + +// SecretObject describes the properties of a secret object value. +type SecretObject struct { + // Value - The value of the secret. The format of this value will be determined + // based on the type of the secret object. If the type is Opaque, the value will be + // used as is without any modification. + Value *string `json:"value,omitempty"` + // Type - The type of the secret object which determines how the value of the secret object has to be + // interpreted. Possible values include: 'Opaque', 'Vaultsecret' + Type SecretObjectType `json:"type,omitempty"` +} + +// SetValue the properties of a overridable value that can be passed to a task template. +type SetValue struct { + // Name - The name of the overridable value. + Name *string `json:"name,omitempty"` + // Value - The overridable value. + Value *string `json:"value,omitempty"` + // IsSecret - Flag to indicate whether the value represents a secret or not. + IsSecret *bool `json:"isSecret,omitempty"` +} + +// Sku the SKU of a container registry. +type Sku struct { + // Name - The SKU name of the container registry. Required for registry creation. Possible values include: 'Classic', 'Basic', 'Standard', 'Premium' + Name SkuName `json:"name,omitempty"` + // Tier - READ-ONLY; The SKU tier based on the SKU name. Possible values include: 'SkuTierClassic', 'SkuTierBasic', 'SkuTierStandard', 'SkuTierPremium' + Tier SkuTier `json:"tier,omitempty"` +} + +// Source the registry node that generated the event. Put differently, while the actor initiates the event, +// the source generates it. +type Source struct { + // Addr - The IP or hostname and the port of the registry node that generated the event. Generally, this will be resolved by os.Hostname() along with the running port. + Addr *string `json:"addr,omitempty"` + // InstanceID - The running instance of an application. Changes after each restart. + InstanceID *string `json:"instanceID,omitempty"` +} + +// SourceProperties the properties of the source code repository. +type SourceProperties struct { + // SourceControlType - The type of source control service. Possible values include: 'Github', 'VisualStudioTeamService' + SourceControlType SourceControlType `json:"sourceControlType,omitempty"` + // RepositoryURL - The full URL to the source code repository + RepositoryURL *string `json:"repositoryUrl,omitempty"` + // Branch - The branch name of the source code. + Branch *string `json:"branch,omitempty"` + // SourceControlAuthProperties - The authorization properties for accessing the source code repository and to set up + // webhooks for notifications. + SourceControlAuthProperties *AuthInfo `json:"sourceControlAuthProperties,omitempty"` +} + +// SourceRegistryCredentials describes the credential parameters for accessing the source registry. +type SourceRegistryCredentials struct { + // LoginMode - The authentication mode which determines the source registry login scope. The credentials for the source registry + // will be generated using the given scope. These credentials will be used to login to + // the source registry during the run. Possible values include: 'SourceRegistryLoginModeNone', 'SourceRegistryLoginModeDefault' + LoginMode SourceRegistryLoginMode `json:"loginMode,omitempty"` +} + +// SourceTrigger the properties of a source based trigger. +type SourceTrigger struct { + // SourceRepository - The properties that describes the source(code) for the task. + SourceRepository *SourceProperties `json:"sourceRepository,omitempty"` + // SourceTriggerEvents - The source event corresponding to the trigger. + SourceTriggerEvents *[]SourceTriggerEvent `json:"sourceTriggerEvents,omitempty"` + // Status - The current status of trigger. Possible values include: 'TriggerStatusDisabled', 'TriggerStatusEnabled' + Status TriggerStatus `json:"status,omitempty"` + // Name - The name of the trigger. + Name *string `json:"name,omitempty"` +} + +// SourceTriggerDescriptor the source trigger that caused a run. +type SourceTriggerDescriptor struct { + // ID - The unique ID of the trigger. + ID *string `json:"id,omitempty"` + // EventType - The event type of the trigger. + EventType *string `json:"eventType,omitempty"` + // CommitID - The unique ID that identifies a commit. + CommitID *string `json:"commitId,omitempty"` + // PullRequestID - The unique ID that identifies pull request. + PullRequestID *string `json:"pullRequestId,omitempty"` + // RepositoryURL - The repository URL. + RepositoryURL *string `json:"repositoryUrl,omitempty"` + // BranchName - The branch name in the repository. + BranchName *string `json:"branchName,omitempty"` + // ProviderType - The source control provider type. + ProviderType *string `json:"providerType,omitempty"` +} + +// SourceTriggerUpdateParameters the properties for updating a source based trigger. +type SourceTriggerUpdateParameters struct { + // SourceRepository - The properties that describes the source(code) for the task. + SourceRepository *SourceUpdateParameters `json:"sourceRepository,omitempty"` + // SourceTriggerEvents - The source event corresponding to the trigger. + SourceTriggerEvents *[]SourceTriggerEvent `json:"sourceTriggerEvents,omitempty"` + // Status - The current status of trigger. Possible values include: 'TriggerStatusDisabled', 'TriggerStatusEnabled' + Status TriggerStatus `json:"status,omitempty"` + // Name - The name of the trigger. + Name *string `json:"name,omitempty"` +} + +// SourceUpdateParameters the properties for updating the source code repository. +type SourceUpdateParameters struct { + // SourceControlType - The type of source control service. Possible values include: 'Github', 'VisualStudioTeamService' + SourceControlType SourceControlType `json:"sourceControlType,omitempty"` + // RepositoryURL - The full URL to the source code repository + RepositoryURL *string `json:"repositoryUrl,omitempty"` + // Branch - The branch name of the source code. + Branch *string `json:"branch,omitempty"` + // SourceControlAuthProperties - The authorization properties for accessing the source code repository and to set up + // webhooks for notifications. + SourceControlAuthProperties *AuthInfoUpdateParameters `json:"sourceControlAuthProperties,omitempty"` +} + +// SourceUploadDefinition the properties of a response to source upload request. +type SourceUploadDefinition struct { + autorest.Response `json:"-"` + // UploadURL - The URL where the client can upload the source. + UploadURL *string `json:"uploadUrl,omitempty"` + // RelativePath - The relative path to the source. This is used to submit the subsequent queue build request. + RelativePath *string `json:"relativePath,omitempty"` +} + +// Status the status of an Azure resource at the time the operation was called. +type Status struct { + // DisplayStatus - READ-ONLY; The short label for the status. + DisplayStatus *string `json:"displayStatus,omitempty"` + // Message - READ-ONLY; The detailed message for the status, including alerts and error messages. + Message *string `json:"message,omitempty"` + // Timestamp - READ-ONLY; The timestamp when the status was changed to the current value. + Timestamp *date.Time `json:"timestamp,omitempty"` +} + +// StorageAccountProperties the properties of a storage account for a container registry. Only applicable +// to Classic SKU. +type StorageAccountProperties struct { + // ID - The resource ID of the storage account. + ID *string `json:"id,omitempty"` +} + +// Target the target of the event. +type Target struct { + // MediaType - The MIME type of the referenced object. + MediaType *string `json:"mediaType,omitempty"` + // Size - The number of bytes of the content. Same as Length field. + Size *int64 `json:"size,omitempty"` + // Digest - The digest of the content, as defined by the Registry V2 HTTP API Specification. + Digest *string `json:"digest,omitempty"` + // Length - The number of bytes of the content. Same as Size field. + Length *int64 `json:"length,omitempty"` + // Repository - The repository name. + Repository *string `json:"repository,omitempty"` + // URL - The direct URL to the content. + URL *string `json:"url,omitempty"` + // Tag - The tag name. + Tag *string `json:"tag,omitempty"` + // Name - The name of the artifact. + Name *string `json:"name,omitempty"` + // Version - The version of the artifact. + Version *string `json:"version,omitempty"` +} + +// Task the task that has the ARM resource and task properties. +// The task will have all information to schedule a run against it. +type Task struct { + autorest.Response `json:"-"` + // Identity - Identity for the resource. + Identity *IdentityProperties `json:"identity,omitempty"` + // TaskProperties - The properties of a task. + *TaskProperties `json:"properties,omitempty"` + // ID - READ-ONLY; The resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. + Type *string `json:"type,omitempty"` + // Location - The location of the resource. This cannot be changed after the resource is created. + Location *string `json:"location,omitempty"` + // Tags - The tags of the resource. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Task. +func (t Task) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if t.Identity != nil { + objectMap["identity"] = t.Identity + } + if t.TaskProperties != nil { + objectMap["properties"] = t.TaskProperties + } + if t.Location != nil { + objectMap["location"] = t.Location + } + if t.Tags != nil { + objectMap["tags"] = t.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Task struct. +func (t *Task) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "identity": + if v != nil { + var identity IdentityProperties + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + t.Identity = &identity + } + case "properties": + if v != nil { + var taskProperties TaskProperties + err = json.Unmarshal(*v, &taskProperties) + if err != nil { + return err + } + t.TaskProperties = &taskProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + t.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + t.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + t.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + t.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + t.Tags = tags + } + } + } + + return nil +} + +// TaskListResult the collection of tasks. +type TaskListResult struct { + autorest.Response `json:"-"` + // Value - The collection value. + Value *[]Task `json:"value,omitempty"` + // NextLink - The URI that can be used to request the next set of paged results. + NextLink *string `json:"nextLink,omitempty"` +} + +// TaskListResultIterator provides access to a complete listing of Task values. +type TaskListResultIterator struct { + i int + page TaskListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *TaskListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TaskListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *TaskListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter TaskListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter TaskListResultIterator) Response() TaskListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter TaskListResultIterator) Value() Task { + if !iter.page.NotDone() { + return Task{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the TaskListResultIterator type. +func NewTaskListResultIterator(page TaskListResultPage) TaskListResultIterator { + return TaskListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (tlr TaskListResult) IsEmpty() bool { + return tlr.Value == nil || len(*tlr.Value) == 0 +} + +// taskListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (tlr TaskListResult) taskListResultPreparer(ctx context.Context) (*http.Request, error) { + if tlr.NextLink == nil || len(to.String(tlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(tlr.NextLink))) +} + +// TaskListResultPage contains a page of Task values. +type TaskListResultPage struct { + fn func(context.Context, TaskListResult) (TaskListResult, error) + tlr TaskListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *TaskListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TaskListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.tlr) + if err != nil { + return err + } + page.tlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *TaskListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page TaskListResultPage) NotDone() bool { + return !page.tlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page TaskListResultPage) Response() TaskListResult { + return page.tlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page TaskListResultPage) Values() []Task { + if page.tlr.IsEmpty() { + return nil + } + return *page.tlr.Value +} + +// Creates a new instance of the TaskListResultPage type. +func NewTaskListResultPage(getNextPage func(context.Context, TaskListResult) (TaskListResult, error)) TaskListResultPage { + return TaskListResultPage{fn: getNextPage} +} + +// TaskProperties the properties of a task. +type TaskProperties struct { + // ProvisioningState - READ-ONLY; The provisioning state of the task. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + // CreationDate - READ-ONLY; The creation date of task. + CreationDate *date.Time `json:"creationDate,omitempty"` + // Status - The current status of task. Possible values include: 'TaskStatusDisabled', 'TaskStatusEnabled' + Status TaskStatus `json:"status,omitempty"` + // Platform - The platform properties against which the run has to happen. + Platform *PlatformProperties `json:"platform,omitempty"` + // AgentConfiguration - The machine configuration of the run agent. + AgentConfiguration *AgentProperties `json:"agentConfiguration,omitempty"` + // Timeout - Run timeout in seconds. + Timeout *int32 `json:"timeout,omitempty"` + // Step - The properties of a task step. + Step BasicTaskStepProperties `json:"step,omitempty"` + // Trigger - The properties that describe all triggers for the task. + Trigger *TriggerProperties `json:"trigger,omitempty"` + // Credentials - The properties that describes a set of credentials that will be used when this run is invoked. + Credentials *Credentials `json:"credentials,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for TaskProperties struct. +func (tp *TaskProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "provisioningState": + if v != nil { + var provisioningState ProvisioningState + err = json.Unmarshal(*v, &provisioningState) + if err != nil { + return err + } + tp.ProvisioningState = provisioningState + } + case "creationDate": + if v != nil { + var creationDate date.Time + err = json.Unmarshal(*v, &creationDate) + if err != nil { + return err + } + tp.CreationDate = &creationDate + } + case "status": + if v != nil { + var status TaskStatus + err = json.Unmarshal(*v, &status) + if err != nil { + return err + } + tp.Status = status + } + case "platform": + if v != nil { + var platform PlatformProperties + err = json.Unmarshal(*v, &platform) + if err != nil { + return err + } + tp.Platform = &platform + } + case "agentConfiguration": + if v != nil { + var agentConfiguration AgentProperties + err = json.Unmarshal(*v, &agentConfiguration) + if err != nil { + return err + } + tp.AgentConfiguration = &agentConfiguration + } + case "timeout": + if v != nil { + var timeout int32 + err = json.Unmarshal(*v, &timeout) + if err != nil { + return err + } + tp.Timeout = &timeout + } + case "step": + if v != nil { + step, err := unmarshalBasicTaskStepProperties(*v) + if err != nil { + return err + } + tp.Step = step + } + case "trigger": + if v != nil { + var trigger TriggerProperties + err = json.Unmarshal(*v, &trigger) + if err != nil { + return err + } + tp.Trigger = &trigger + } + case "credentials": + if v != nil { + var credentials Credentials + err = json.Unmarshal(*v, &credentials) + if err != nil { + return err + } + tp.Credentials = &credentials + } + } + } + + return nil +} + +// TaskPropertiesUpdateParameters the properties for updating a task. +type TaskPropertiesUpdateParameters struct { + // Status - The current status of task. Possible values include: 'TaskStatusDisabled', 'TaskStatusEnabled' + Status TaskStatus `json:"status,omitempty"` + // Platform - The platform properties against which the run has to happen. + Platform *PlatformUpdateParameters `json:"platform,omitempty"` + // AgentConfiguration - The machine configuration of the run agent. + AgentConfiguration *AgentProperties `json:"agentConfiguration,omitempty"` + // Timeout - Run timeout in seconds. + Timeout *int32 `json:"timeout,omitempty"` + // Step - The properties for updating a task step. + Step BasicTaskStepUpdateParameters `json:"step,omitempty"` + // Trigger - The properties for updating trigger properties. + Trigger *TriggerUpdateParameters `json:"trigger,omitempty"` + // Credentials - The parameters that describes a set of credentials that will be used when this run is invoked. + Credentials *Credentials `json:"credentials,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for TaskPropertiesUpdateParameters struct. +func (tpup *TaskPropertiesUpdateParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "status": + if v != nil { + var status TaskStatus + err = json.Unmarshal(*v, &status) + if err != nil { + return err + } + tpup.Status = status + } + case "platform": + if v != nil { + var platform PlatformUpdateParameters + err = json.Unmarshal(*v, &platform) + if err != nil { + return err + } + tpup.Platform = &platform + } + case "agentConfiguration": + if v != nil { + var agentConfiguration AgentProperties + err = json.Unmarshal(*v, &agentConfiguration) + if err != nil { + return err + } + tpup.AgentConfiguration = &agentConfiguration + } + case "timeout": + if v != nil { + var timeout int32 + err = json.Unmarshal(*v, &timeout) + if err != nil { + return err + } + tpup.Timeout = &timeout + } + case "step": + if v != nil { + step, err := unmarshalBasicTaskStepUpdateParameters(*v) + if err != nil { + return err + } + tpup.Step = step + } + case "trigger": + if v != nil { + var trigger TriggerUpdateParameters + err = json.Unmarshal(*v, &trigger) + if err != nil { + return err + } + tpup.Trigger = &trigger + } + case "credentials": + if v != nil { + var credentials Credentials + err = json.Unmarshal(*v, &credentials) + if err != nil { + return err + } + tpup.Credentials = &credentials + } + } + } + + return nil +} + +// TaskRunRequest the parameters for a task run request. +type TaskRunRequest struct { + // TaskName - The name of task against which run has to be queued. + TaskName *string `json:"taskName,omitempty"` + // Values - The collection of overridable values that can be passed when running a task. + Values *[]SetValue `json:"values,omitempty"` + // IsArchiveEnabled - The value that indicates whether archiving is enabled for the run or not. + IsArchiveEnabled *bool `json:"isArchiveEnabled,omitempty"` + // Type - Possible values include: 'TypeRunRequest', 'TypeDockerBuildRequest', 'TypeFileTaskRunRequest', 'TypeTaskRunRequest', 'TypeEncodedTaskRunRequest' + Type Type `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for TaskRunRequest. +func (trr TaskRunRequest) MarshalJSON() ([]byte, error) { + trr.Type = TypeTaskRunRequest + objectMap := make(map[string]interface{}) + if trr.TaskName != nil { + objectMap["taskName"] = trr.TaskName + } + if trr.Values != nil { + objectMap["values"] = trr.Values + } + if trr.IsArchiveEnabled != nil { + objectMap["isArchiveEnabled"] = trr.IsArchiveEnabled + } + if trr.Type != "" { + objectMap["type"] = trr.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildRequest is the BasicRunRequest implementation for TaskRunRequest. +func (trr TaskRunRequest) AsDockerBuildRequest() (*DockerBuildRequest, bool) { + return nil, false +} + +// AsFileTaskRunRequest is the BasicRunRequest implementation for TaskRunRequest. +func (trr TaskRunRequest) AsFileTaskRunRequest() (*FileTaskRunRequest, bool) { + return nil, false +} + +// AsTaskRunRequest is the BasicRunRequest implementation for TaskRunRequest. +func (trr TaskRunRequest) AsTaskRunRequest() (*TaskRunRequest, bool) { + return &trr, true +} + +// AsEncodedTaskRunRequest is the BasicRunRequest implementation for TaskRunRequest. +func (trr TaskRunRequest) AsEncodedTaskRunRequest() (*EncodedTaskRunRequest, bool) { + return nil, false +} + +// AsRunRequest is the BasicRunRequest implementation for TaskRunRequest. +func (trr TaskRunRequest) AsRunRequest() (*RunRequest, bool) { + return nil, false +} + +// AsBasicRunRequest is the BasicRunRequest implementation for TaskRunRequest. +func (trr TaskRunRequest) AsBasicRunRequest() (BasicRunRequest, bool) { + return &trr, true +} + +// TasksCreateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type TasksCreateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *TasksCreateFuture) Result(client TasksClient) (t Task, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.TasksCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent { + t, err = client.CreateResponder(t.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksCreateFuture", "Result", t.Response.Response, "Failure responding to request") + } + } + return +} + +// TasksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type TasksDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *TasksDeleteFuture) Result(client TasksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.TasksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// BasicTaskStepProperties base properties for any task step. +type BasicTaskStepProperties interface { + AsDockerBuildStep() (*DockerBuildStep, bool) + AsFileTaskStep() (*FileTaskStep, bool) + AsEncodedTaskStep() (*EncodedTaskStep, bool) + AsTaskStepProperties() (*TaskStepProperties, bool) +} + +// TaskStepProperties base properties for any task step. +type TaskStepProperties struct { + // BaseImageDependencies - READ-ONLY; List of base image dependencies for a step. + BaseImageDependencies *[]BaseImageDependency `json:"baseImageDependencies,omitempty"` + // ContextPath - The URL(absolute or relative) of the source context for the task step. + ContextPath *string `json:"contextPath,omitempty"` + // ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step. + ContextAccessToken *string `json:"contextAccessToken,omitempty"` + // Type - Possible values include: 'TypeTaskStepProperties', 'TypeDocker', 'TypeFileTask', 'TypeEncodedTask' + Type TypeBasicTaskStepProperties `json:"type,omitempty"` +} + +func unmarshalBasicTaskStepProperties(body []byte) (BasicTaskStepProperties, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["type"] { + case string(TypeDocker): + var dbs DockerBuildStep + err := json.Unmarshal(body, &dbs) + return dbs, err + case string(TypeFileTask): + var fts FileTaskStep + err := json.Unmarshal(body, &fts) + return fts, err + case string(TypeEncodedTask): + var ets EncodedTaskStep + err := json.Unmarshal(body, &ets) + return ets, err + default: + var tsp TaskStepProperties + err := json.Unmarshal(body, &tsp) + return tsp, err + } +} +func unmarshalBasicTaskStepPropertiesArray(body []byte) ([]BasicTaskStepProperties, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + tspArray := make([]BasicTaskStepProperties, len(rawMessages)) + + for index, rawMessage := range rawMessages { + tsp, err := unmarshalBasicTaskStepProperties(*rawMessage) + if err != nil { + return nil, err + } + tspArray[index] = tsp + } + return tspArray, nil +} + +// MarshalJSON is the custom marshaler for TaskStepProperties. +func (tsp TaskStepProperties) MarshalJSON() ([]byte, error) { + tsp.Type = TypeTaskStepProperties + objectMap := make(map[string]interface{}) + if tsp.ContextPath != nil { + objectMap["contextPath"] = tsp.ContextPath + } + if tsp.ContextAccessToken != nil { + objectMap["contextAccessToken"] = tsp.ContextAccessToken + } + if tsp.Type != "" { + objectMap["type"] = tsp.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildStep is the BasicTaskStepProperties implementation for TaskStepProperties. +func (tsp TaskStepProperties) AsDockerBuildStep() (*DockerBuildStep, bool) { + return nil, false +} + +// AsFileTaskStep is the BasicTaskStepProperties implementation for TaskStepProperties. +func (tsp TaskStepProperties) AsFileTaskStep() (*FileTaskStep, bool) { + return nil, false +} + +// AsEncodedTaskStep is the BasicTaskStepProperties implementation for TaskStepProperties. +func (tsp TaskStepProperties) AsEncodedTaskStep() (*EncodedTaskStep, bool) { + return nil, false +} + +// AsTaskStepProperties is the BasicTaskStepProperties implementation for TaskStepProperties. +func (tsp TaskStepProperties) AsTaskStepProperties() (*TaskStepProperties, bool) { + return &tsp, true +} + +// AsBasicTaskStepProperties is the BasicTaskStepProperties implementation for TaskStepProperties. +func (tsp TaskStepProperties) AsBasicTaskStepProperties() (BasicTaskStepProperties, bool) { + return &tsp, true +} + +// BasicTaskStepUpdateParameters base properties for updating any task step. +type BasicTaskStepUpdateParameters interface { + AsDockerBuildStepUpdateParameters() (*DockerBuildStepUpdateParameters, bool) + AsFileTaskStepUpdateParameters() (*FileTaskStepUpdateParameters, bool) + AsEncodedTaskStepUpdateParameters() (*EncodedTaskStepUpdateParameters, bool) + AsTaskStepUpdateParameters() (*TaskStepUpdateParameters, bool) +} + +// TaskStepUpdateParameters base properties for updating any task step. +type TaskStepUpdateParameters struct { + // ContextPath - The URL(absolute or relative) of the source context for the task step. + ContextPath *string `json:"contextPath,omitempty"` + // ContextAccessToken - The token (git PAT or SAS token of storage account blob) associated with the context for a step. + ContextAccessToken *string `json:"contextAccessToken,omitempty"` + // Type - Possible values include: 'TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters', 'TypeBasicTaskStepUpdateParametersTypeDocker', 'TypeBasicTaskStepUpdateParametersTypeFileTask', 'TypeBasicTaskStepUpdateParametersTypeEncodedTask' + Type TypeBasicTaskStepUpdateParameters `json:"type,omitempty"` +} + +func unmarshalBasicTaskStepUpdateParameters(body []byte) (BasicTaskStepUpdateParameters, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["type"] { + case string(TypeBasicTaskStepUpdateParametersTypeDocker): + var dbsup DockerBuildStepUpdateParameters + err := json.Unmarshal(body, &dbsup) + return dbsup, err + case string(TypeBasicTaskStepUpdateParametersTypeFileTask): + var ftsup FileTaskStepUpdateParameters + err := json.Unmarshal(body, &ftsup) + return ftsup, err + case string(TypeBasicTaskStepUpdateParametersTypeEncodedTask): + var etsup EncodedTaskStepUpdateParameters + err := json.Unmarshal(body, &etsup) + return etsup, err + default: + var tsup TaskStepUpdateParameters + err := json.Unmarshal(body, &tsup) + return tsup, err + } +} +func unmarshalBasicTaskStepUpdateParametersArray(body []byte) ([]BasicTaskStepUpdateParameters, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + tsupArray := make([]BasicTaskStepUpdateParameters, len(rawMessages)) + + for index, rawMessage := range rawMessages { + tsup, err := unmarshalBasicTaskStepUpdateParameters(*rawMessage) + if err != nil { + return nil, err + } + tsupArray[index] = tsup + } + return tsupArray, nil +} + +// MarshalJSON is the custom marshaler for TaskStepUpdateParameters. +func (tsup TaskStepUpdateParameters) MarshalJSON() ([]byte, error) { + tsup.Type = TypeBasicTaskStepUpdateParametersTypeTaskStepUpdateParameters + objectMap := make(map[string]interface{}) + if tsup.ContextPath != nil { + objectMap["contextPath"] = tsup.ContextPath + } + if tsup.ContextAccessToken != nil { + objectMap["contextAccessToken"] = tsup.ContextAccessToken + } + if tsup.Type != "" { + objectMap["type"] = tsup.Type + } + return json.Marshal(objectMap) +} + +// AsDockerBuildStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for TaskStepUpdateParameters. +func (tsup TaskStepUpdateParameters) AsDockerBuildStepUpdateParameters() (*DockerBuildStepUpdateParameters, bool) { + return nil, false +} + +// AsFileTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for TaskStepUpdateParameters. +func (tsup TaskStepUpdateParameters) AsFileTaskStepUpdateParameters() (*FileTaskStepUpdateParameters, bool) { + return nil, false +} + +// AsEncodedTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for TaskStepUpdateParameters. +func (tsup TaskStepUpdateParameters) AsEncodedTaskStepUpdateParameters() (*EncodedTaskStepUpdateParameters, bool) { + return nil, false +} + +// AsTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for TaskStepUpdateParameters. +func (tsup TaskStepUpdateParameters) AsTaskStepUpdateParameters() (*TaskStepUpdateParameters, bool) { + return &tsup, true +} + +// AsBasicTaskStepUpdateParameters is the BasicTaskStepUpdateParameters implementation for TaskStepUpdateParameters. +func (tsup TaskStepUpdateParameters) AsBasicTaskStepUpdateParameters() (BasicTaskStepUpdateParameters, bool) { + return &tsup, true +} + +// TasksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type TasksUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *TasksUpdateFuture) Result(client TasksClient) (t Task, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.TasksUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent { + t, err = client.UpdateResponder(t.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksUpdateFuture", "Result", t.Response.Response, "Failure responding to request") + } + } + return +} + +// TaskUpdateParameters the parameters for updating a task. +type TaskUpdateParameters struct { + // Identity - Identity for the resource. + Identity *IdentityProperties `json:"identity,omitempty"` + // TaskPropertiesUpdateParameters - The properties for updating a task. + *TaskPropertiesUpdateParameters `json:"properties,omitempty"` + // Tags - The ARM resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for TaskUpdateParameters. +func (tup TaskUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if tup.Identity != nil { + objectMap["identity"] = tup.Identity + } + if tup.TaskPropertiesUpdateParameters != nil { + objectMap["properties"] = tup.TaskPropertiesUpdateParameters + } + if tup.Tags != nil { + objectMap["tags"] = tup.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for TaskUpdateParameters struct. +func (tup *TaskUpdateParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "identity": + if v != nil { + var identity IdentityProperties + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + tup.Identity = &identity + } + case "properties": + if v != nil { + var taskPropertiesUpdateParameters TaskPropertiesUpdateParameters + err = json.Unmarshal(*v, &taskPropertiesUpdateParameters) + if err != nil { + return err + } + tup.TaskPropertiesUpdateParameters = &taskPropertiesUpdateParameters + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + tup.Tags = tags + } + } + } + + return nil +} + +// TimerTrigger the properties of a timer trigger. +type TimerTrigger struct { + // Schedule - The CRON expression for the task schedule + Schedule *string `json:"schedule,omitempty"` + // Status - The current status of trigger. Possible values include: 'TriggerStatusDisabled', 'TriggerStatusEnabled' + Status TriggerStatus `json:"status,omitempty"` + // Name - The name of the trigger. + Name *string `json:"name,omitempty"` +} + +// TimerTriggerDescriptor ... +type TimerTriggerDescriptor struct { + // TimerTriggerName - The timer trigger name that caused the run. + TimerTriggerName *string `json:"timerTriggerName,omitempty"` + // ScheduleOccurrence - The occurrence that triggered the run. + ScheduleOccurrence *string `json:"scheduleOccurrence,omitempty"` +} + +// TimerTriggerUpdateParameters the properties for updating a timer trigger. +type TimerTriggerUpdateParameters struct { + // Schedule - The CRON expression for the task schedule + Schedule *string `json:"schedule,omitempty"` + // Status - The current status of trigger. Possible values include: 'TriggerStatusDisabled', 'TriggerStatusEnabled' + Status TriggerStatus `json:"status,omitempty"` + // Name - The name of the trigger. + Name *string `json:"name,omitempty"` +} + +// TriggerProperties the properties of a trigger. +type TriggerProperties struct { + // TimerTriggers - The collection of timer triggers. + TimerTriggers *[]TimerTrigger `json:"timerTriggers,omitempty"` + // SourceTriggers - The collection of triggers based on source code repository. + SourceTriggers *[]SourceTrigger `json:"sourceTriggers,omitempty"` + // BaseImageTrigger - The trigger based on base image dependencies. + BaseImageTrigger *BaseImageTrigger `json:"baseImageTrigger,omitempty"` +} + +// TriggerUpdateParameters the properties for updating triggers. +type TriggerUpdateParameters struct { + // TimerTriggers - The collection of timer triggers. + TimerTriggers *[]TimerTriggerUpdateParameters `json:"timerTriggers,omitempty"` + // SourceTriggers - The collection of triggers based on source code repository. + SourceTriggers *[]SourceTriggerUpdateParameters `json:"sourceTriggers,omitempty"` + // BaseImageTrigger - The trigger based on base image dependencies. + BaseImageTrigger *BaseImageTriggerUpdateParameters `json:"baseImageTrigger,omitempty"` +} + +// TrustPolicy the content trust policy for a container registry. +type TrustPolicy struct { + // Type - The type of trust policy. Possible values include: 'Notary' + Type TrustPolicyType `json:"type,omitempty"` + // Status - The value that indicates whether the policy is enabled or not. Possible values include: 'Enabled', 'Disabled' + Status PolicyStatus `json:"status,omitempty"` +} + +// UserIdentityProperties ... +type UserIdentityProperties struct { + // PrincipalID - The principal id of user assigned identity. + PrincipalID *string `json:"principalId,omitempty"` + // ClientID - The client id of user assigned identity. + ClientID *string `json:"clientId,omitempty"` +} + +// VirtualNetworkRule virtual network rule. +type VirtualNetworkRule struct { + // Action - The action of virtual network rule. Possible values include: 'Allow' + Action Action `json:"action,omitempty"` + // VirtualNetworkResourceID - Resource ID of a subnet, for example: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}. + VirtualNetworkResourceID *string `json:"id,omitempty"` +} + +// Webhook an object that represents a webhook for a container registry. +type Webhook struct { + autorest.Response `json:"-"` + // WebhookProperties - The properties of the webhook. + *WebhookProperties `json:"properties,omitempty"` + // ID - READ-ONLY; The resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. + Type *string `json:"type,omitempty"` + // Location - The location of the resource. This cannot be changed after the resource is created. + Location *string `json:"location,omitempty"` + // Tags - The tags of the resource. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Webhook. +func (w Webhook) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if w.WebhookProperties != nil { + objectMap["properties"] = w.WebhookProperties + } + if w.Location != nil { + objectMap["location"] = w.Location + } + if w.Tags != nil { + objectMap["tags"] = w.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Webhook struct. +func (w *Webhook) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var webhookProperties WebhookProperties + err = json.Unmarshal(*v, &webhookProperties) + if err != nil { + return err + } + w.WebhookProperties = &webhookProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + w.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + w.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + w.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + w.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + w.Tags = tags + } + } + } + + return nil +} + +// WebhookCreateParameters the parameters for creating a webhook. +type WebhookCreateParameters struct { + // Tags - The tags for the webhook. + Tags map[string]*string `json:"tags"` + // Location - The location of the webhook. This cannot be changed after the resource is created. + Location *string `json:"location,omitempty"` + // WebhookPropertiesCreateParameters - The properties that the webhook will be created with. + *WebhookPropertiesCreateParameters `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for WebhookCreateParameters. +func (wcp WebhookCreateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if wcp.Tags != nil { + objectMap["tags"] = wcp.Tags + } + if wcp.Location != nil { + objectMap["location"] = wcp.Location + } + if wcp.WebhookPropertiesCreateParameters != nil { + objectMap["properties"] = wcp.WebhookPropertiesCreateParameters + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for WebhookCreateParameters struct. +func (wcp *WebhookCreateParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + wcp.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + wcp.Location = &location + } + case "properties": + if v != nil { + var webhookPropertiesCreateParameters WebhookPropertiesCreateParameters + err = json.Unmarshal(*v, &webhookPropertiesCreateParameters) + if err != nil { + return err + } + wcp.WebhookPropertiesCreateParameters = &webhookPropertiesCreateParameters + } + } + } + + return nil +} + +// WebhookListResult the result of a request to list webhooks for a container registry. +type WebhookListResult struct { + autorest.Response `json:"-"` + // Value - The list of webhooks. Since this list may be incomplete, the nextLink field should be used to request the next list of webhooks. + Value *[]Webhook `json:"value,omitempty"` + // NextLink - The URI that can be used to request the next list of webhooks. + NextLink *string `json:"nextLink,omitempty"` +} + +// WebhookListResultIterator provides access to a complete listing of Webhook values. +type WebhookListResultIterator struct { + i int + page WebhookListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *WebhookListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WebhookListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *WebhookListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter WebhookListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter WebhookListResultIterator) Response() WebhookListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter WebhookListResultIterator) Value() Webhook { + if !iter.page.NotDone() { + return Webhook{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the WebhookListResultIterator type. +func NewWebhookListResultIterator(page WebhookListResultPage) WebhookListResultIterator { + return WebhookListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (wlr WebhookListResult) IsEmpty() bool { + return wlr.Value == nil || len(*wlr.Value) == 0 +} + +// webhookListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (wlr WebhookListResult) webhookListResultPreparer(ctx context.Context) (*http.Request, error) { + if wlr.NextLink == nil || len(to.String(wlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(wlr.NextLink))) +} + +// WebhookListResultPage contains a page of Webhook values. +type WebhookListResultPage struct { + fn func(context.Context, WebhookListResult) (WebhookListResult, error) + wlr WebhookListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *WebhookListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WebhookListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.wlr) + if err != nil { + return err + } + page.wlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *WebhookListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page WebhookListResultPage) NotDone() bool { + return !page.wlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page WebhookListResultPage) Response() WebhookListResult { + return page.wlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page WebhookListResultPage) Values() []Webhook { + if page.wlr.IsEmpty() { + return nil + } + return *page.wlr.Value +} + +// Creates a new instance of the WebhookListResultPage type. +func NewWebhookListResultPage(getNextPage func(context.Context, WebhookListResult) (WebhookListResult, error)) WebhookListResultPage { + return WebhookListResultPage{fn: getNextPage} +} + +// WebhookProperties the properties of a webhook. +type WebhookProperties struct { + // Status - The status of the webhook at the time the operation was called. Possible values include: 'WebhookStatusEnabled', 'WebhookStatusDisabled' + Status WebhookStatus `json:"status,omitempty"` + // Scope - The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means all events. + Scope *string `json:"scope,omitempty"` + // Actions - The list of actions that trigger the webhook to post notifications. + Actions *[]WebhookAction `json:"actions,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state of the webhook at the time the operation was called. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// WebhookPropertiesCreateParameters the parameters for creating the properties of a webhook. +type WebhookPropertiesCreateParameters struct { + // ServiceURI - The service URI for the webhook to post notifications. + ServiceURI *string `json:"serviceUri,omitempty"` + // CustomHeaders - Custom headers that will be added to the webhook notifications. + CustomHeaders map[string]*string `json:"customHeaders"` + // Status - The status of the webhook at the time the operation was called. Possible values include: 'WebhookStatusEnabled', 'WebhookStatusDisabled' + Status WebhookStatus `json:"status,omitempty"` + // Scope - The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means all events. + Scope *string `json:"scope,omitempty"` + // Actions - The list of actions that trigger the webhook to post notifications. + Actions *[]WebhookAction `json:"actions,omitempty"` +} + +// MarshalJSON is the custom marshaler for WebhookPropertiesCreateParameters. +func (wpcp WebhookPropertiesCreateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if wpcp.ServiceURI != nil { + objectMap["serviceUri"] = wpcp.ServiceURI + } + if wpcp.CustomHeaders != nil { + objectMap["customHeaders"] = wpcp.CustomHeaders + } + if wpcp.Status != "" { + objectMap["status"] = wpcp.Status + } + if wpcp.Scope != nil { + objectMap["scope"] = wpcp.Scope + } + if wpcp.Actions != nil { + objectMap["actions"] = wpcp.Actions + } + return json.Marshal(objectMap) +} + +// WebhookPropertiesUpdateParameters the parameters for updating the properties of a webhook. +type WebhookPropertiesUpdateParameters struct { + // ServiceURI - The service URI for the webhook to post notifications. + ServiceURI *string `json:"serviceUri,omitempty"` + // CustomHeaders - Custom headers that will be added to the webhook notifications. + CustomHeaders map[string]*string `json:"customHeaders"` + // Status - The status of the webhook at the time the operation was called. Possible values include: 'WebhookStatusEnabled', 'WebhookStatusDisabled' + Status WebhookStatus `json:"status,omitempty"` + // Scope - The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means all events. + Scope *string `json:"scope,omitempty"` + // Actions - The list of actions that trigger the webhook to post notifications. + Actions *[]WebhookAction `json:"actions,omitempty"` +} + +// MarshalJSON is the custom marshaler for WebhookPropertiesUpdateParameters. +func (wpup WebhookPropertiesUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if wpup.ServiceURI != nil { + objectMap["serviceUri"] = wpup.ServiceURI + } + if wpup.CustomHeaders != nil { + objectMap["customHeaders"] = wpup.CustomHeaders + } + if wpup.Status != "" { + objectMap["status"] = wpup.Status + } + if wpup.Scope != nil { + objectMap["scope"] = wpup.Scope + } + if wpup.Actions != nil { + objectMap["actions"] = wpup.Actions + } + return json.Marshal(objectMap) +} + +// WebhooksCreateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type WebhooksCreateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *WebhooksCreateFuture) Result(client WebhooksClient) (w Webhook, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.WebhooksCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if w.Response.Response, err = future.GetResult(sender); err == nil && w.Response.Response.StatusCode != http.StatusNoContent { + w, err = client.CreateResponder(w.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksCreateFuture", "Result", w.Response.Response, "Failure responding to request") + } + } + return +} + +// WebhooksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type WebhooksDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *WebhooksDeleteFuture) Result(client WebhooksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.WebhooksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// WebhooksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type WebhooksUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *WebhooksUpdateFuture) Result(client WebhooksClient) (w Webhook, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.WebhooksUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if w.Response.Response, err = future.GetResult(sender); err == nil && w.Response.Response.StatusCode != http.StatusNoContent { + w, err = client.UpdateResponder(w.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksUpdateFuture", "Result", w.Response.Response, "Failure responding to request") + } + } + return +} + +// WebhookUpdateParameters the parameters for updating a webhook. +type WebhookUpdateParameters struct { + // Tags - The tags for the webhook. + Tags map[string]*string `json:"tags"` + // WebhookPropertiesUpdateParameters - The properties that the webhook will be updated with. + *WebhookPropertiesUpdateParameters `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for WebhookUpdateParameters. +func (wup WebhookUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if wup.Tags != nil { + objectMap["tags"] = wup.Tags + } + if wup.WebhookPropertiesUpdateParameters != nil { + objectMap["properties"] = wup.WebhookPropertiesUpdateParameters + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for WebhookUpdateParameters struct. +func (wup *WebhookUpdateParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + wup.Tags = tags + } + case "properties": + if v != nil { + var webhookPropertiesUpdateParameters WebhookPropertiesUpdateParameters + err = json.Unmarshal(*v, &webhookPropertiesUpdateParameters) + if err != nil { + return err + } + wup.WebhookPropertiesUpdateParameters = &webhookPropertiesUpdateParameters + } + } + } + + return nil +} diff --git a/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/operations.go b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/operations.go new file mode 100644 index 000000000..901490055 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/operations.go @@ -0,0 +1,147 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// OperationsClient is the client for the Operations methods of the Containerregistry service. +type OperationsClient struct { + BaseClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists all of the available Azure Container Registry REST API operations. +func (client OperationsClient) List(ctx context.Context) (result OperationListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") + defer func() { + sc := -1 + if result.olr.Response.Response != nil { + sc = result.olr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.olr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.OperationsClient", "List", resp, "Failure sending request") + return + } + + result.olr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.OperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.ContainerRegistry/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client OperationsClient) listNextResults(ctx context.Context, lastResults OperationListResult) (result OperationListResult, err error) { + req, err := lastResults.operationListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.OperationsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.OperationsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.OperationsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client OperationsClient) ListComplete(ctx context.Context) (result OperationListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} diff --git a/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/registries.go b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/registries.go new file mode 100644 index 000000000..4ba0f940e --- /dev/null +++ b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/registries.go @@ -0,0 +1,1243 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// RegistriesClient is the client for the Registries methods of the Containerregistry service. +type RegistriesClient struct { + BaseClient +} + +// NewRegistriesClient creates an instance of the RegistriesClient client. +func NewRegistriesClient(subscriptionID string) RegistriesClient { + return NewRegistriesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRegistriesClientWithBaseURI creates an instance of the RegistriesClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewRegistriesClientWithBaseURI(baseURI string, subscriptionID string) RegistriesClient { + return RegistriesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability checks whether the container registry name is available for use. The name must contain only +// alphanumeric characters, be globally unique, and between 5 and 50 characters in length. +// Parameters: +// registryNameCheckRequest - the object containing information for the availability request. +func (client RegistriesClient) CheckNameAvailability(ctx context.Context, registryNameCheckRequest RegistryNameCheckRequest) (result RegistryNameStatus, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.CheckNameAvailability") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryNameCheckRequest, + Constraints: []validation.Constraint{{Target: "registryNameCheckRequest.Name", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "registryNameCheckRequest.Name", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryNameCheckRequest.Name", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryNameCheckRequest.Name", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}, + }}, + {Target: "registryNameCheckRequest.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RegistriesClient", "CheckNameAvailability", err.Error()) + } + + req, err := client.CheckNameAvailabilityPreparer(ctx, registryNameCheckRequest) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CheckNameAvailability", nil, "Failure preparing request") + return + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CheckNameAvailability", resp, "Failure sending request") + return + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CheckNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client RegistriesClient) CheckNameAvailabilityPreparer(ctx context.Context, registryNameCheckRequest RegistryNameCheckRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerRegistry/checkNameAvailability", pathParameters), + autorest.WithJSON(registryNameCheckRequest), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client RegistriesClient) CheckNameAvailabilityResponder(resp *http.Response) (result RegistryNameStatus, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Create creates a container registry with the specified parameters. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// registry - the parameters for creating a container registry. +func (client RegistriesClient) Create(ctx context.Context, resourceGroupName string, registryName string, registry Registry) (result RegistriesCreateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.Create") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: registry, + Constraints: []validation.Constraint{{Target: "registry.Sku", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "registry.RegistryProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "registry.RegistryProperties.StorageAccount", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "registry.RegistryProperties.StorageAccount.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("containerregistry.RegistriesClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, registryName, registry) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Create", nil, "Failure preparing request") + return + } + + result, err = client.CreateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Create", result.Response(), "Failure sending request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client RegistriesClient) CreatePreparer(ctx context.Context, resourceGroupName string, registryName string, registry Registry) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}", pathParameters), + autorest.WithJSON(registry), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) CreateSender(req *http.Request) (future RegistriesCreateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client RegistriesClient) CreateResponder(resp *http.Response) (result Registry, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a container registry. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +func (client RegistriesClient) Delete(ctx context.Context, resourceGroupName string, registryName string) (result RegistriesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RegistriesClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, registryName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RegistriesClient) DeletePreparer(ctx context.Context, resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) DeleteSender(req *http.Request) (future RegistriesDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RegistriesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the properties of the specified container registry. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +func (client RegistriesClient) Get(ctx context.Context, resourceGroupName string, registryName string) (result Registry, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RegistriesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, registryName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RegistriesClient) GetPreparer(ctx context.Context, resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RegistriesClient) GetResponder(resp *http.Response) (result Registry, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetBuildSourceUploadURL get the upload location for the user to be able to upload the source. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +func (client RegistriesClient) GetBuildSourceUploadURL(ctx context.Context, resourceGroupName string, registryName string) (result SourceUploadDefinition, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.GetBuildSourceUploadURL") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RegistriesClient", "GetBuildSourceUploadURL", err.Error()) + } + + req, err := client.GetBuildSourceUploadURLPreparer(ctx, resourceGroupName, registryName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetBuildSourceUploadURL", nil, "Failure preparing request") + return + } + + resp, err := client.GetBuildSourceUploadURLSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetBuildSourceUploadURL", resp, "Failure sending request") + return + } + + result, err = client.GetBuildSourceUploadURLResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetBuildSourceUploadURL", resp, "Failure responding to request") + } + + return +} + +// GetBuildSourceUploadURLPreparer prepares the GetBuildSourceUploadURL request. +func (client RegistriesClient) GetBuildSourceUploadURLPreparer(ctx context.Context, resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/listBuildSourceUploadUrl", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetBuildSourceUploadURLSender sends the GetBuildSourceUploadURL request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) GetBuildSourceUploadURLSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetBuildSourceUploadURLResponder handles the response to the GetBuildSourceUploadURL request. The method always +// closes the http.Response Body. +func (client RegistriesClient) GetBuildSourceUploadURLResponder(resp *http.Response) (result SourceUploadDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ImportImage copies an image to this container registry from the specified container registry. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// parameters - the parameters specifying the image to copy and the source container registry. +func (client RegistriesClient) ImportImage(ctx context.Context, resourceGroupName string, registryName string, parameters ImportImageParameters) (result RegistriesImportImageFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.ImportImage") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Source", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Source.Credentials", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Source.Credentials.Password", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Source.SourceImage", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("containerregistry.RegistriesClient", "ImportImage", err.Error()) + } + + req, err := client.ImportImagePreparer(ctx, resourceGroupName, registryName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ImportImage", nil, "Failure preparing request") + return + } + + result, err = client.ImportImageSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ImportImage", result.Response(), "Failure sending request") + return + } + + return +} + +// ImportImagePreparer prepares the ImportImage request. +func (client RegistriesClient) ImportImagePreparer(ctx context.Context, resourceGroupName string, registryName string, parameters ImportImageParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/importImage", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ImportImageSender sends the ImportImage request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) ImportImageSender(req *http.Request) (future RegistriesImportImageFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ImportImageResponder handles the response to the ImportImage request. The method always +// closes the http.Response Body. +func (client RegistriesClient) ImportImageResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// List lists all the container registries under the specified subscription. +func (client RegistriesClient) List(ctx context.Context) (result RegistryListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.List") + defer func() { + sc := -1 + if result.rlr.Response.Response != nil { + sc = result.rlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", resp, "Failure sending request") + return + } + + result.rlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client RegistriesClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerRegistry/registries", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RegistriesClient) ListResponder(resp *http.Response) (result RegistryListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client RegistriesClient) listNextResults(ctx context.Context, lastResults RegistryListResult) (result RegistryListResult, err error) { + req, err := lastResults.registryListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client RegistriesClient) ListComplete(ctx context.Context) (result RegistryListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup lists all the container registries under the specified resource group. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +func (client RegistriesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result RegistryListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.rlr.Response.Response != nil { + sc = result.rlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RegistriesClient", "ListByResourceGroup", err.Error()) + } + + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.rlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.rlr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client RegistriesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client RegistriesClient) ListByResourceGroupResponder(resp *http.Response) (result RegistryListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client RegistriesClient) listByResourceGroupNextResults(ctx context.Context, lastResults RegistryListResult) (result RegistryListResult, err error) { + req, err := lastResults.registryListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client RegistriesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result RegistryListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// ListCredentials lists the login credentials for the specified container registry. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +func (client RegistriesClient) ListCredentials(ctx context.Context, resourceGroupName string, registryName string) (result RegistryListCredentialsResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.ListCredentials") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RegistriesClient", "ListCredentials", err.Error()) + } + + req, err := client.ListCredentialsPreparer(ctx, resourceGroupName, registryName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListCredentials", nil, "Failure preparing request") + return + } + + resp, err := client.ListCredentialsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListCredentials", resp, "Failure sending request") + return + } + + result, err = client.ListCredentialsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListCredentials", resp, "Failure responding to request") + } + + return +} + +// ListCredentialsPreparer prepares the ListCredentials request. +func (client RegistriesClient) ListCredentialsPreparer(ctx context.Context, resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/listCredentials", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListCredentialsSender sends the ListCredentials request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) ListCredentialsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListCredentialsResponder handles the response to the ListCredentials request. The method always +// closes the http.Response Body. +func (client RegistriesClient) ListCredentialsResponder(resp *http.Response) (result RegistryListCredentialsResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListUsages gets the quota usages for the specified container registry. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +func (client RegistriesClient) ListUsages(ctx context.Context, resourceGroupName string, registryName string) (result RegistryUsageListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.ListUsages") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RegistriesClient", "ListUsages", err.Error()) + } + + req, err := client.ListUsagesPreparer(ctx, resourceGroupName, registryName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListUsages", nil, "Failure preparing request") + return + } + + resp, err := client.ListUsagesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListUsages", resp, "Failure sending request") + return + } + + result, err = client.ListUsagesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListUsages", resp, "Failure responding to request") + } + + return +} + +// ListUsagesPreparer prepares the ListUsages request. +func (client RegistriesClient) ListUsagesPreparer(ctx context.Context, resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/listUsages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListUsagesSender sends the ListUsages request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) ListUsagesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListUsagesResponder handles the response to the ListUsages request. The method always +// closes the http.Response Body. +func (client RegistriesClient) ListUsagesResponder(resp *http.Response) (result RegistryUsageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateCredential regenerates one of the login credentials for the specified container registry. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// regenerateCredentialParameters - specifies name of the password which should be regenerated -- password or +// password2. +func (client RegistriesClient) RegenerateCredential(ctx context.Context, resourceGroupName string, registryName string, regenerateCredentialParameters RegenerateCredentialParameters) (result RegistryListCredentialsResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.RegenerateCredential") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RegistriesClient", "RegenerateCredential", err.Error()) + } + + req, err := client.RegenerateCredentialPreparer(ctx, resourceGroupName, registryName, regenerateCredentialParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "RegenerateCredential", nil, "Failure preparing request") + return + } + + resp, err := client.RegenerateCredentialSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "RegenerateCredential", resp, "Failure sending request") + return + } + + result, err = client.RegenerateCredentialResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "RegenerateCredential", resp, "Failure responding to request") + } + + return +} + +// RegenerateCredentialPreparer prepares the RegenerateCredential request. +func (client RegistriesClient) RegenerateCredentialPreparer(ctx context.Context, resourceGroupName string, registryName string, regenerateCredentialParameters RegenerateCredentialParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/regenerateCredential", pathParameters), + autorest.WithJSON(regenerateCredentialParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RegenerateCredentialSender sends the RegenerateCredential request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) RegenerateCredentialSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// RegenerateCredentialResponder handles the response to the RegenerateCredential request. The method always +// closes the http.Response Body. +func (client RegistriesClient) RegenerateCredentialResponder(resp *http.Response) (result RegistryListCredentialsResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ScheduleRun schedules a new run based on the request parameters and add it to the run queue. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// runRequest - the parameters of a run that needs to scheduled. +func (client RegistriesClient) ScheduleRun(ctx context.Context, resourceGroupName string, registryName string, runRequest BasicRunRequest) (result RegistriesScheduleRunFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.ScheduleRun") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RegistriesClient", "ScheduleRun", err.Error()) + } + + req, err := client.ScheduleRunPreparer(ctx, resourceGroupName, registryName, runRequest) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ScheduleRun", nil, "Failure preparing request") + return + } + + result, err = client.ScheduleRunSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ScheduleRun", result.Response(), "Failure sending request") + return + } + + return +} + +// ScheduleRunPreparer prepares the ScheduleRun request. +func (client RegistriesClient) ScheduleRunPreparer(ctx context.Context, resourceGroupName string, registryName string, runRequest BasicRunRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scheduleRun", pathParameters), + autorest.WithJSON(runRequest), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ScheduleRunSender sends the ScheduleRun request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) ScheduleRunSender(req *http.Request) (future RegistriesScheduleRunFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ScheduleRunResponder handles the response to the ScheduleRun request. The method always +// closes the http.Response Body. +func (client RegistriesClient) ScheduleRunResponder(resp *http.Response) (result Run, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates a container registry with the specified parameters. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// registryUpdateParameters - the parameters for updating a container registry. +func (client RegistriesClient) Update(ctx context.Context, resourceGroupName string, registryName string, registryUpdateParameters RegistryUpdateParameters) (result RegistriesUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RegistriesClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RegistriesClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, registryName, registryUpdateParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client RegistriesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, registryName string, registryUpdateParameters RegistryUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}", pathParameters), + autorest.WithJSON(registryUpdateParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) UpdateSender(req *http.Request) (future RegistriesUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client RegistriesClient) UpdateResponder(resp *http.Response) (result Registry, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/replications.go b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/replications.go new file mode 100644 index 000000000..045c66b79 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/replications.go @@ -0,0 +1,540 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ReplicationsClient is the client for the Replications methods of the Containerregistry service. +type ReplicationsClient struct { + BaseClient +} + +// NewReplicationsClient creates an instance of the ReplicationsClient client. +func NewReplicationsClient(subscriptionID string) ReplicationsClient { + return NewReplicationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewReplicationsClientWithBaseURI creates an instance of the ReplicationsClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewReplicationsClientWithBaseURI(baseURI string, subscriptionID string) ReplicationsClient { + return ReplicationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a replication for a container registry with the specified parameters. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// replicationName - the name of the replication. +// replication - the parameters for creating a replication. +func (client ReplicationsClient) Create(ctx context.Context, resourceGroupName string, registryName string, replicationName string, replication Replication) (result ReplicationsCreateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationsClient.Create") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: replicationName, + Constraints: []validation.Constraint{{Target: "replicationName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "replicationName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "replicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.ReplicationsClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, registryName, replicationName, replication) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Create", nil, "Failure preparing request") + return + } + + result, err = client.CreateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Create", result.Response(), "Failure sending request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client ReplicationsClient) CreatePreparer(ctx context.Context, resourceGroupName string, registryName string, replicationName string, replication Replication) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "replicationName": autorest.Encode("path", replicationName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}", pathParameters), + autorest.WithJSON(replication), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client ReplicationsClient) CreateSender(req *http.Request) (future ReplicationsCreateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client ReplicationsClient) CreateResponder(resp *http.Response) (result Replication, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a replication from a container registry. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// replicationName - the name of the replication. +func (client ReplicationsClient) Delete(ctx context.Context, resourceGroupName string, registryName string, replicationName string) (result ReplicationsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: replicationName, + Constraints: []validation.Constraint{{Target: "replicationName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "replicationName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "replicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.ReplicationsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, registryName, replicationName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ReplicationsClient) DeletePreparer(ctx context.Context, resourceGroupName string, registryName string, replicationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "replicationName": autorest.Encode("path", replicationName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ReplicationsClient) DeleteSender(req *http.Request) (future ReplicationsDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ReplicationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the properties of the specified replication. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// replicationName - the name of the replication. +func (client ReplicationsClient) Get(ctx context.Context, resourceGroupName string, registryName string, replicationName string) (result Replication, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: replicationName, + Constraints: []validation.Constraint{{Target: "replicationName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "replicationName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "replicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.ReplicationsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, registryName, replicationName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ReplicationsClient) GetPreparer(ctx context.Context, resourceGroupName string, registryName string, replicationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "replicationName": autorest.Encode("path", replicationName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ReplicationsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ReplicationsClient) GetResponder(resp *http.Response) (result Replication, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the replications for the specified container registry. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +func (client ReplicationsClient) List(ctx context.Context, resourceGroupName string, registryName string) (result ReplicationListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationsClient.List") + defer func() { + sc := -1 + if result.rlr.Response.Response != nil { + sc = result.rlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.ReplicationsClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, registryName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "List", resp, "Failure sending request") + return + } + + result.rlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ReplicationsClient) ListPreparer(ctx context.Context, resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ReplicationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ReplicationsClient) ListResponder(resp *http.Response) (result ReplicationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ReplicationsClient) listNextResults(ctx context.Context, lastResults ReplicationListResult) (result ReplicationListResult, err error) { + req, err := lastResults.replicationListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ReplicationsClient) ListComplete(ctx context.Context, resourceGroupName string, registryName string) (result ReplicationListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, registryName) + return +} + +// Update updates a replication for a container registry with the specified parameters. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// replicationName - the name of the replication. +// replicationUpdateParameters - the parameters for updating a replication. +func (client ReplicationsClient) Update(ctx context.Context, resourceGroupName string, registryName string, replicationName string, replicationUpdateParameters ReplicationUpdateParameters) (result ReplicationsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationsClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: replicationName, + Constraints: []validation.Constraint{{Target: "replicationName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "replicationName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "replicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.ReplicationsClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, registryName, replicationName, replicationUpdateParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client ReplicationsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, registryName string, replicationName string, replicationUpdateParameters ReplicationUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "replicationName": autorest.Encode("path", replicationName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}", pathParameters), + autorest.WithJSON(replicationUpdateParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client ReplicationsClient) UpdateSender(req *http.Request) (future ReplicationsUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client ReplicationsClient) UpdateResponder(resp *http.Response) (result Replication, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/runs.go b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/runs.go new file mode 100644 index 000000000..91c393f0c --- /dev/null +++ b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/runs.go @@ -0,0 +1,530 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// RunsClient is the client for the Runs methods of the Containerregistry service. +type RunsClient struct { + BaseClient +} + +// NewRunsClient creates an instance of the RunsClient client. +func NewRunsClient(subscriptionID string) RunsClient { + return NewRunsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRunsClientWithBaseURI creates an instance of the RunsClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewRunsClientWithBaseURI(baseURI string, subscriptionID string) RunsClient { + return RunsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Cancel cancel an existing run. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// runID - the run ID. +func (client RunsClient) Cancel(ctx context.Context, resourceGroupName string, registryName string, runID string) (result RunsCancelFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RunsClient.Cancel") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RunsClient", "Cancel", err.Error()) + } + + req, err := client.CancelPreparer(ctx, resourceGroupName, registryName, runID) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "Cancel", nil, "Failure preparing request") + return + } + + result, err = client.CancelSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "Cancel", result.Response(), "Failure sending request") + return + } + + return +} + +// CancelPreparer prepares the Cancel request. +func (client RunsClient) CancelPreparer(ctx context.Context, resourceGroupName string, registryName string, runID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "runId": autorest.Encode("path", runID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/runs/{runId}/cancel", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CancelSender sends the Cancel request. The method will close the +// http.Response Body if it receives an error. +func (client RunsClient) CancelSender(req *http.Request) (future RunsCancelFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CancelResponder handles the response to the Cancel request. The method always +// closes the http.Response Body. +func (client RunsClient) CancelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the detailed information for a given run. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// runID - the run ID. +func (client RunsClient) Get(ctx context.Context, resourceGroupName string, registryName string, runID string) (result Run, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RunsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RunsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, registryName, runID) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RunsClient) GetPreparer(ctx context.Context, resourceGroupName string, registryName string, runID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "runId": autorest.Encode("path", runID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/runs/{runId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RunsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RunsClient) GetResponder(resp *http.Response) (result Run, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetLogSasURL gets a link to download the run logs. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// runID - the run ID. +func (client RunsClient) GetLogSasURL(ctx context.Context, resourceGroupName string, registryName string, runID string) (result RunGetLogResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RunsClient.GetLogSasURL") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RunsClient", "GetLogSasURL", err.Error()) + } + + req, err := client.GetLogSasURLPreparer(ctx, resourceGroupName, registryName, runID) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "GetLogSasURL", nil, "Failure preparing request") + return + } + + resp, err := client.GetLogSasURLSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "GetLogSasURL", resp, "Failure sending request") + return + } + + result, err = client.GetLogSasURLResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "GetLogSasURL", resp, "Failure responding to request") + } + + return +} + +// GetLogSasURLPreparer prepares the GetLogSasURL request. +func (client RunsClient) GetLogSasURLPreparer(ctx context.Context, resourceGroupName string, registryName string, runID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "runId": autorest.Encode("path", runID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/runs/{runId}/listLogSasUrl", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetLogSasURLSender sends the GetLogSasURL request. The method will close the +// http.Response Body if it receives an error. +func (client RunsClient) GetLogSasURLSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetLogSasURLResponder handles the response to the GetLogSasURL request. The method always +// closes the http.Response Body. +func (client RunsClient) GetLogSasURLResponder(resp *http.Response) (result RunGetLogResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the runs for a registry. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// filter - the runs filter to apply on the operation. Arithmetic operators are not supported. The allowed +// string function is 'contains'. All logical operators except 'Not', 'Has', 'All' are allowed. +// top - $top is supported for get list of runs, which limits the maximum number of runs to return. +func (client RunsClient) List(ctx context.Context, resourceGroupName string, registryName string, filter string, top *int32) (result RunListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RunsClient.List") + defer func() { + sc := -1 + if result.rlr.Response.Response != nil { + sc = result.rlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RunsClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, registryName, filter, top) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "List", resp, "Failure sending request") + return + } + + result.rlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client RunsClient) ListPreparer(ctx context.Context, resourceGroupName string, registryName string, filter string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/runs", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RunsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RunsClient) ListResponder(resp *http.Response) (result RunListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client RunsClient) listNextResults(ctx context.Context, lastResults RunListResult) (result RunListResult, err error) { + req, err := lastResults.runListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RunsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RunsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client RunsClient) ListComplete(ctx context.Context, resourceGroupName string, registryName string, filter string, top *int32) (result RunListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RunsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, registryName, filter, top) + return +} + +// Update patch the run properties. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// runID - the run ID. +// runUpdateParameters - the run update properties. +func (client RunsClient) Update(ctx context.Context, resourceGroupName string, registryName string, runID string, runUpdateParameters RunUpdateParameters) (result RunsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RunsClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.RunsClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, registryName, runID, runUpdateParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client RunsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, registryName string, runID string, runUpdateParameters RunUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "runId": autorest.Encode("path", runID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/runs/{runId}", pathParameters), + autorest.WithJSON(runUpdateParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client RunsClient) UpdateSender(req *http.Request) (future RunsUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client RunsClient) UpdateResponder(resp *http.Response) (result Run, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/tasks.go b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/tasks.go new file mode 100644 index 000000000..fc11eb857 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/tasks.go @@ -0,0 +1,645 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// TasksClient is the client for the Tasks methods of the Containerregistry service. +type TasksClient struct { + BaseClient +} + +// NewTasksClient creates an instance of the TasksClient client. +func NewTasksClient(subscriptionID string) TasksClient { + return NewTasksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTasksClientWithBaseURI creates an instance of the TasksClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewTasksClientWithBaseURI(baseURI string, subscriptionID string) TasksClient { + return TasksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a task for a container registry with the specified parameters. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// taskName - the name of the container registry task. +// taskCreateParameters - the parameters for creating a task. +func (client TasksClient) Create(ctx context.Context, resourceGroupName string, registryName string, taskName string, taskCreateParameters Task) (result TasksCreateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TasksClient.Create") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: taskName, + Constraints: []validation.Constraint{{Target: "taskName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "taskName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "taskName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9-_]*$`, Chain: nil}}}, + {TargetValue: taskCreateParameters, + Constraints: []validation.Constraint{{Target: "taskCreateParameters.TaskProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "taskCreateParameters.TaskProperties.Platform", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "taskCreateParameters.TaskProperties.Timeout", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "taskCreateParameters.TaskProperties.Timeout", Name: validation.InclusiveMaximum, Rule: int64(28800), Chain: nil}, + {Target: "taskCreateParameters.TaskProperties.Timeout", Name: validation.InclusiveMinimum, Rule: int64(300), Chain: nil}, + }}, + {Target: "taskCreateParameters.TaskProperties.Step", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "taskCreateParameters.TaskProperties.Trigger", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "taskCreateParameters.TaskProperties.Trigger.BaseImageTrigger", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "taskCreateParameters.TaskProperties.Trigger.BaseImageTrigger.Name", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + }}}}}); err != nil { + return result, validation.NewError("containerregistry.TasksClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, registryName, taskName, taskCreateParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "Create", nil, "Failure preparing request") + return + } + + result, err = client.CreateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "Create", result.Response(), "Failure sending request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client TasksClient) CreatePreparer(ctx context.Context, resourceGroupName string, registryName string, taskName string, taskCreateParameters Task) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "taskName": autorest.Encode("path", taskName), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}", pathParameters), + autorest.WithJSON(taskCreateParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client TasksClient) CreateSender(req *http.Request) (future TasksCreateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client TasksClient) CreateResponder(resp *http.Response) (result Task, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a specified task. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// taskName - the name of the container registry task. +func (client TasksClient) Delete(ctx context.Context, resourceGroupName string, registryName string, taskName string) (result TasksDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TasksClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: taskName, + Constraints: []validation.Constraint{{Target: "taskName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "taskName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "taskName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9-_]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.TasksClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, registryName, taskName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client TasksClient) DeletePreparer(ctx context.Context, resourceGroupName string, registryName string, taskName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "taskName": autorest.Encode("path", taskName), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client TasksClient) DeleteSender(req *http.Request) (future TasksDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client TasksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get the properties of a specified task. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// taskName - the name of the container registry task. +func (client TasksClient) Get(ctx context.Context, resourceGroupName string, registryName string, taskName string) (result Task, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TasksClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: taskName, + Constraints: []validation.Constraint{{Target: "taskName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "taskName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "taskName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9-_]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.TasksClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, registryName, taskName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client TasksClient) GetPreparer(ctx context.Context, resourceGroupName string, registryName string, taskName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "taskName": autorest.Encode("path", taskName), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client TasksClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client TasksClient) GetResponder(resp *http.Response) (result Task, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetDetails returns a task with extended information that includes all secrets. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// taskName - the name of the container registry task. +func (client TasksClient) GetDetails(ctx context.Context, resourceGroupName string, registryName string, taskName string) (result Task, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TasksClient.GetDetails") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: taskName, + Constraints: []validation.Constraint{{Target: "taskName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "taskName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "taskName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9-_]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.TasksClient", "GetDetails", err.Error()) + } + + req, err := client.GetDetailsPreparer(ctx, resourceGroupName, registryName, taskName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "GetDetails", nil, "Failure preparing request") + return + } + + resp, err := client.GetDetailsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "GetDetails", resp, "Failure sending request") + return + } + + result, err = client.GetDetailsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "GetDetails", resp, "Failure responding to request") + } + + return +} + +// GetDetailsPreparer prepares the GetDetails request. +func (client TasksClient) GetDetailsPreparer(ctx context.Context, resourceGroupName string, registryName string, taskName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "taskName": autorest.Encode("path", taskName), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}/listDetails", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDetailsSender sends the GetDetails request. The method will close the +// http.Response Body if it receives an error. +func (client TasksClient) GetDetailsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetDetailsResponder handles the response to the GetDetails request. The method always +// closes the http.Response Body. +func (client TasksClient) GetDetailsResponder(resp *http.Response) (result Task, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the tasks for a specified container registry. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +func (client TasksClient) List(ctx context.Context, resourceGroupName string, registryName string) (result TaskListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TasksClient.List") + defer func() { + sc := -1 + if result.tlr.Response.Response != nil { + sc = result.tlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.TasksClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, registryName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.tlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "List", resp, "Failure sending request") + return + } + + result.tlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client TasksClient) ListPreparer(ctx context.Context, resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TasksClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TasksClient) ListResponder(resp *http.Response) (result TaskListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client TasksClient) listNextResults(ctx context.Context, lastResults TaskListResult) (result TaskListResult, err error) { + req, err := lastResults.taskListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.TasksClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.TasksClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client TasksClient) ListComplete(ctx context.Context, resourceGroupName string, registryName string) (result TaskListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TasksClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, registryName) + return +} + +// Update updates a task with the specified parameters. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// taskName - the name of the container registry task. +// taskUpdateParameters - the parameters for updating a task. +func (client TasksClient) Update(ctx context.Context, resourceGroupName string, registryName string, taskName string, taskUpdateParameters TaskUpdateParameters) (result TasksUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TasksClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: taskName, + Constraints: []validation.Constraint{{Target: "taskName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "taskName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "taskName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9-_]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.TasksClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, registryName, taskName, taskUpdateParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client TasksClient) UpdatePreparer(ctx context.Context, resourceGroupName string, registryName string, taskName string, taskUpdateParameters TaskUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "taskName": autorest.Encode("path", taskName), + } + + const APIVersion = "2019-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}", pathParameters), + autorest.WithJSON(taskUpdateParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client TasksClient) UpdateSender(req *http.Request) (future TasksUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client TasksClient) UpdateResponder(resp *http.Response) (result Task, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/version.go b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/version.go new file mode 100644 index 000000000..8c51fa478 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/version.go @@ -0,0 +1,30 @@ +package containerregistry + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + Version() + " containerregistry/2019-05-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/webhooks.go b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/webhooks.go new file mode 100644 index 000000000..7de5c13ec --- /dev/null +++ b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry/webhooks.go @@ -0,0 +1,860 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// WebhooksClient is the client for the Webhooks methods of the Containerregistry service. +type WebhooksClient struct { + BaseClient +} + +// NewWebhooksClient creates an instance of the WebhooksClient client. +func NewWebhooksClient(subscriptionID string) WebhooksClient { + return NewWebhooksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWebhooksClientWithBaseURI creates an instance of the WebhooksClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewWebhooksClientWithBaseURI(baseURI string, subscriptionID string) WebhooksClient { + return WebhooksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a webhook for a container registry with the specified parameters. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// webhookName - the name of the webhook. +// webhookCreateParameters - the parameters for creating a webhook. +func (client WebhooksClient) Create(ctx context.Context, resourceGroupName string, registryName string, webhookName string, webhookCreateParameters WebhookCreateParameters) (result WebhooksCreateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WebhooksClient.Create") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookCreateParameters, + Constraints: []validation.Constraint{{Target: "webhookCreateParameters.Location", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "webhookCreateParameters.WebhookPropertiesCreateParameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "webhookCreateParameters.WebhookPropertiesCreateParameters.ServiceURI", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "webhookCreateParameters.WebhookPropertiesCreateParameters.Actions", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("containerregistry.WebhooksClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, registryName, webhookName, webhookCreateParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Create", nil, "Failure preparing request") + return + } + + result, err = client.CreateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Create", result.Response(), "Failure sending request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client WebhooksClient) CreatePreparer(ctx context.Context, resourceGroupName string, registryName string, webhookName string, webhookCreateParameters WebhookCreateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}", pathParameters), + autorest.WithJSON(webhookCreateParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) CreateSender(req *http.Request) (future WebhooksCreateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client WebhooksClient) CreateResponder(resp *http.Response) (result Webhook, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a webhook from a container registry. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// webhookName - the name of the webhook. +func (client WebhooksClient) Delete(ctx context.Context, resourceGroupName string, registryName string, webhookName string) (result WebhooksDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WebhooksClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.WebhooksClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, registryName, webhookName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client WebhooksClient) DeletePreparer(ctx context.Context, resourceGroupName string, registryName string, webhookName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) DeleteSender(req *http.Request) (future WebhooksDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client WebhooksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the properties of the specified webhook. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// webhookName - the name of the webhook. +func (client WebhooksClient) Get(ctx context.Context, resourceGroupName string, registryName string, webhookName string) (result Webhook, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WebhooksClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.WebhooksClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, registryName, webhookName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WebhooksClient) GetPreparer(ctx context.Context, resourceGroupName string, registryName string, webhookName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WebhooksClient) GetResponder(resp *http.Response) (result Webhook, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetCallbackConfig gets the configuration of service URI and custom headers for the webhook. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// webhookName - the name of the webhook. +func (client WebhooksClient) GetCallbackConfig(ctx context.Context, resourceGroupName string, registryName string, webhookName string) (result CallbackConfig, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WebhooksClient.GetCallbackConfig") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.WebhooksClient", "GetCallbackConfig", err.Error()) + } + + req, err := client.GetCallbackConfigPreparer(ctx, resourceGroupName, registryName, webhookName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "GetCallbackConfig", nil, "Failure preparing request") + return + } + + resp, err := client.GetCallbackConfigSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "GetCallbackConfig", resp, "Failure sending request") + return + } + + result, err = client.GetCallbackConfigResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "GetCallbackConfig", resp, "Failure responding to request") + } + + return +} + +// GetCallbackConfigPreparer prepares the GetCallbackConfig request. +func (client WebhooksClient) GetCallbackConfigPreparer(ctx context.Context, resourceGroupName string, registryName string, webhookName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}/getCallbackConfig", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetCallbackConfigSender sends the GetCallbackConfig request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) GetCallbackConfigSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetCallbackConfigResponder handles the response to the GetCallbackConfig request. The method always +// closes the http.Response Body. +func (client WebhooksClient) GetCallbackConfigResponder(resp *http.Response) (result CallbackConfig, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the webhooks for the specified container registry. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +func (client WebhooksClient) List(ctx context.Context, resourceGroupName string, registryName string) (result WebhookListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WebhooksClient.List") + defer func() { + sc := -1 + if result.wlr.Response.Response != nil { + sc = result.wlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.WebhooksClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, registryName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.wlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "List", resp, "Failure sending request") + return + } + + result.wlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client WebhooksClient) ListPreparer(ctx context.Context, resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client WebhooksClient) ListResponder(resp *http.Response) (result WebhookListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client WebhooksClient) listNextResults(ctx context.Context, lastResults WebhookListResult) (result WebhookListResult, err error) { + req, err := lastResults.webhookListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client WebhooksClient) ListComplete(ctx context.Context, resourceGroupName string, registryName string) (result WebhookListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WebhooksClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, registryName) + return +} + +// ListEvents lists recent events for the specified webhook. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// webhookName - the name of the webhook. +func (client WebhooksClient) ListEvents(ctx context.Context, resourceGroupName string, registryName string, webhookName string) (result EventListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WebhooksClient.ListEvents") + defer func() { + sc := -1 + if result.elr.Response.Response != nil { + sc = result.elr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.WebhooksClient", "ListEvents", err.Error()) + } + + result.fn = client.listEventsNextResults + req, err := client.ListEventsPreparer(ctx, resourceGroupName, registryName, webhookName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "ListEvents", nil, "Failure preparing request") + return + } + + resp, err := client.ListEventsSender(req) + if err != nil { + result.elr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "ListEvents", resp, "Failure sending request") + return + } + + result.elr, err = client.ListEventsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "ListEvents", resp, "Failure responding to request") + } + + return +} + +// ListEventsPreparer prepares the ListEvents request. +func (client WebhooksClient) ListEventsPreparer(ctx context.Context, resourceGroupName string, registryName string, webhookName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}/listEvents", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListEventsSender sends the ListEvents request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) ListEventsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListEventsResponder handles the response to the ListEvents request. The method always +// closes the http.Response Body. +func (client WebhooksClient) ListEventsResponder(resp *http.Response) (result EventListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listEventsNextResults retrieves the next set of results, if any. +func (client WebhooksClient) listEventsNextResults(ctx context.Context, lastResults EventListResult) (result EventListResult, err error) { + req, err := lastResults.eventListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "listEventsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListEventsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "listEventsNextResults", resp, "Failure sending next results request") + } + result, err = client.ListEventsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "listEventsNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListEventsComplete enumerates all values, automatically crossing page boundaries as required. +func (client WebhooksClient) ListEventsComplete(ctx context.Context, resourceGroupName string, registryName string, webhookName string) (result EventListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WebhooksClient.ListEvents") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListEvents(ctx, resourceGroupName, registryName, webhookName) + return +} + +// Ping triggers a ping event to be sent to the webhook. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// webhookName - the name of the webhook. +func (client WebhooksClient) Ping(ctx context.Context, resourceGroupName string, registryName string, webhookName string) (result EventInfo, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WebhooksClient.Ping") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.WebhooksClient", "Ping", err.Error()) + } + + req, err := client.PingPreparer(ctx, resourceGroupName, registryName, webhookName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Ping", nil, "Failure preparing request") + return + } + + resp, err := client.PingSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Ping", resp, "Failure sending request") + return + } + + result, err = client.PingResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Ping", resp, "Failure responding to request") + } + + return +} + +// PingPreparer prepares the Ping request. +func (client WebhooksClient) PingPreparer(ctx context.Context, resourceGroupName string, registryName string, webhookName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}/ping", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PingSender sends the Ping request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) PingSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// PingResponder handles the response to the Ping request. The method always +// closes the http.Response Body. +func (client WebhooksClient) PingResponder(resp *http.Response) (result EventInfo, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates a webhook with the specified parameters. +// Parameters: +// resourceGroupName - the name of the resource group to which the container registry belongs. +// registryName - the name of the container registry. +// webhookName - the name of the webhook. +// webhookUpdateParameters - the parameters for updating a webhook. +func (client WebhooksClient) Update(ctx context.Context, resourceGroupName string, registryName string, webhookName string, webhookUpdateParameters WebhookUpdateParameters) (result WebhooksUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WebhooksClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerregistry.WebhooksClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, registryName, webhookName, webhookUpdateParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client WebhooksClient) UpdatePreparer(ctx context.Context, resourceGroupName string, registryName string, webhookName string, webhookUpdateParameters WebhookUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2019-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}", pathParameters), + autorest.WithJSON(webhookUpdateParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) UpdateSender(req *http.Request) (future WebhooksUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client WebhooksClient) UpdateResponder(resp *http.Response) (result Webhook, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/extender/code/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/version/version.go new file mode 100644 index 000000000..ae1ef97e1 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -0,0 +1,21 @@ +package version + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// Number contains the semantic version of this SDK. +const Number = "v43.0.0" diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/LICENSE b/extender/code/vendor/github.com/Azure/go-ansiterm/LICENSE new file mode 100644 index 000000000..e3d9a64d1 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/README.md b/extender/code/vendor/github.com/Azure/go-ansiterm/README.md new file mode 100644 index 000000000..261c041e7 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/README.md @@ -0,0 +1,12 @@ +# go-ansiterm + +This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. + +For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. + +The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). + +See parser_test.go for examples exercising the state machine and generating appropriate function calls. + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/constants.go b/extender/code/vendor/github.com/Azure/go-ansiterm/constants.go new file mode 100644 index 000000000..96504a33b --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/constants.go @@ -0,0 +1,188 @@ +package ansiterm + +const LogEnv = "DEBUG_TERMINAL" + +// ANSI constants +// References: +// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm +// -- http://man7.org/linux/man-pages/man4/console_codes.4.html +// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +// -- http://en.wikipedia.org/wiki/ANSI_escape_code +// -- http://vt100.net/emu/dec_ansi_parser +// -- http://vt100.net/emu/vt500_parser.svg +// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html +// -- http://www.inwap.com/pdp10/ansicode.txt +const ( + // ECMA-48 Set Graphics Rendition + // Note: + // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved + // -- Fonts could possibly be supported via SetCurrentConsoleFontEx + // -- Windows does not expose the per-window cursor (i.e., caret) blink times + ANSI_SGR_RESET = 0 + ANSI_SGR_BOLD = 1 + ANSI_SGR_DIM = 2 + _ANSI_SGR_ITALIC = 3 + ANSI_SGR_UNDERLINE = 4 + _ANSI_SGR_BLINKSLOW = 5 + _ANSI_SGR_BLINKFAST = 6 + ANSI_SGR_REVERSE = 7 + _ANSI_SGR_INVISIBLE = 8 + _ANSI_SGR_LINETHROUGH = 9 + _ANSI_SGR_FONT_00 = 10 + _ANSI_SGR_FONT_01 = 11 + _ANSI_SGR_FONT_02 = 12 + _ANSI_SGR_FONT_03 = 13 + _ANSI_SGR_FONT_04 = 14 + _ANSI_SGR_FONT_05 = 15 + _ANSI_SGR_FONT_06 = 16 + _ANSI_SGR_FONT_07 = 17 + _ANSI_SGR_FONT_08 = 18 + _ANSI_SGR_FONT_09 = 19 + _ANSI_SGR_FONT_10 = 20 + _ANSI_SGR_DOUBLEUNDERLINE = 21 + ANSI_SGR_BOLD_DIM_OFF = 22 + _ANSI_SGR_ITALIC_OFF = 23 + ANSI_SGR_UNDERLINE_OFF = 24 + _ANSI_SGR_BLINK_OFF = 25 + _ANSI_SGR_RESERVED_00 = 26 + ANSI_SGR_REVERSE_OFF = 27 + _ANSI_SGR_INVISIBLE_OFF = 28 + _ANSI_SGR_LINETHROUGH_OFF = 29 + ANSI_SGR_FOREGROUND_BLACK = 30 + ANSI_SGR_FOREGROUND_RED = 31 + ANSI_SGR_FOREGROUND_GREEN = 32 + ANSI_SGR_FOREGROUND_YELLOW = 33 + ANSI_SGR_FOREGROUND_BLUE = 34 + ANSI_SGR_FOREGROUND_MAGENTA = 35 + ANSI_SGR_FOREGROUND_CYAN = 36 + ANSI_SGR_FOREGROUND_WHITE = 37 + _ANSI_SGR_RESERVED_01 = 38 + ANSI_SGR_FOREGROUND_DEFAULT = 39 + ANSI_SGR_BACKGROUND_BLACK = 40 + ANSI_SGR_BACKGROUND_RED = 41 + ANSI_SGR_BACKGROUND_GREEN = 42 + ANSI_SGR_BACKGROUND_YELLOW = 43 + ANSI_SGR_BACKGROUND_BLUE = 44 + ANSI_SGR_BACKGROUND_MAGENTA = 45 + ANSI_SGR_BACKGROUND_CYAN = 46 + ANSI_SGR_BACKGROUND_WHITE = 47 + _ANSI_SGR_RESERVED_02 = 48 + ANSI_SGR_BACKGROUND_DEFAULT = 49 + // 50 - 65: Unsupported + + ANSI_MAX_CMD_LENGTH = 4096 + + MAX_INPUT_EVENTS = 128 + DEFAULT_WIDTH = 80 + DEFAULT_HEIGHT = 24 + + ANSI_BEL = 0x07 + ANSI_BACKSPACE = 0x08 + ANSI_TAB = 0x09 + ANSI_LINE_FEED = 0x0A + ANSI_VERTICAL_TAB = 0x0B + ANSI_FORM_FEED = 0x0C + ANSI_CARRIAGE_RETURN = 0x0D + ANSI_ESCAPE_PRIMARY = 0x1B + ANSI_ESCAPE_SECONDARY = 0x5B + ANSI_OSC_STRING_ENTRY = 0x5D + ANSI_COMMAND_FIRST = 0x40 + ANSI_COMMAND_LAST = 0x7E + DCS_ENTRY = 0x90 + CSI_ENTRY = 0x9B + OSC_STRING = 0x9D + ANSI_PARAMETER_SEP = ";" + ANSI_CMD_G0 = '(' + ANSI_CMD_G1 = ')' + ANSI_CMD_G2 = '*' + ANSI_CMD_G3 = '+' + ANSI_CMD_DECPNM = '>' + ANSI_CMD_DECPAM = '=' + ANSI_CMD_OSC = ']' + ANSI_CMD_STR_TERM = '\\' + + KEY_CONTROL_PARAM_2 = ";2" + KEY_CONTROL_PARAM_3 = ";3" + KEY_CONTROL_PARAM_4 = ";4" + KEY_CONTROL_PARAM_5 = ";5" + KEY_CONTROL_PARAM_6 = ";6" + KEY_CONTROL_PARAM_7 = ";7" + KEY_CONTROL_PARAM_8 = ";8" + KEY_ESC_CSI = "\x1B[" + KEY_ESC_N = "\x1BN" + KEY_ESC_O = "\x1BO" + + FILL_CHARACTER = ' ' +) + +func getByteRange(start byte, end byte) []byte { + bytes := make([]byte, 0, 32) + for i := start; i <= end; i++ { + bytes = append(bytes, byte(i)) + } + + return bytes +} + +var toGroundBytes = getToGroundBytes() +var executors = getExecuteBytes() + +// SPACE 20+A0 hex Always and everywhere a blank space +// Intermediate 20-2F hex !"#$%&'()*+,-./ +var intermeds = getByteRange(0x20, 0x2F) + +// Parameters 30-3F hex 0123456789:;<=>? +// CSI Parameters 30-39, 3B hex 0123456789; +var csiParams = getByteRange(0x30, 0x3F) + +var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) + +// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ +var upperCase = getByteRange(0x40, 0x5F) + +// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ +var lowerCase = getByteRange(0x60, 0x7E) + +// Alphabetics 40-7E hex (all of upper and lower case) +var alphabetics = append(upperCase, lowerCase...) + +var printables = getByteRange(0x20, 0x7F) + +var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) +var escapeToGroundBytes = getEscapeToGroundBytes() + +// See http://www.vt100.net/emu/vt500_parser.png for description of the complex +// byte ranges below + +func getEscapeToGroundBytes() []byte { + escapeToGroundBytes := getByteRange(0x30, 0x4F) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) + escapeToGroundBytes = append(escapeToGroundBytes, 0x59) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) + return escapeToGroundBytes +} + +func getExecuteBytes() []byte { + executeBytes := getByteRange(0x00, 0x17) + executeBytes = append(executeBytes, 0x19) + executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) + return executeBytes +} + +func getToGroundBytes() []byte { + groundBytes := []byte{0x18} + groundBytes = append(groundBytes, 0x1A) + groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) + groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) + groundBytes = append(groundBytes, 0x99) + groundBytes = append(groundBytes, 0x9A) + groundBytes = append(groundBytes, 0x9C) + return groundBytes +} + +// Delete 7F hex Always and everywhere ignored +// C1 Control 80-9F hex 32 additional control characters +// G1 Displayable A1-FE hex 94 additional displayable characters +// Special A0+FF hex Same as SPACE and DELETE diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/context.go b/extender/code/vendor/github.com/Azure/go-ansiterm/context.go new file mode 100644 index 000000000..8d66e777c --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/context.go @@ -0,0 +1,7 @@ +package ansiterm + +type ansiContext struct { + currentChar byte + paramBuffer []byte + interBuffer []byte +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/extender/code/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go new file mode 100644 index 000000000..bcbe00d0c --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -0,0 +1,49 @@ +package ansiterm + +type csiEntryState struct { + baseState +} + +func (csiState csiEntryState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiEntry::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + return csiState.parser.csiParam, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiEntryState) Transition(s state) error { + csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + case csiState.parser.csiParam: + switch { + case sliceContains(csiParams, csiState.parser.context.currentChar): + csiState.parser.collectParam() + case sliceContains(intermeds, csiState.parser.context.currentChar): + csiState.parser.collectInter() + } + } + + return nil +} + +func (csiState csiEntryState) Enter() error { + csiState.parser.clear() + return nil +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/extender/code/vendor/github.com/Azure/go-ansiterm/csi_param_state.go new file mode 100644 index 000000000..7ed5e01c3 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -0,0 +1,38 @@ +package ansiterm + +type csiParamState struct { + baseState +} + +func (csiState csiParamState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiParam::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + csiState.parser.collectParam() + return csiState, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiParamState) Transition(s state) error { + csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + } + + return nil +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/extender/code/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go new file mode 100644 index 000000000..1c719db9e --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -0,0 +1,36 @@ +package ansiterm + +type escapeIntermediateState struct { + baseState +} + +func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeIntermediateState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(intermeds, b): + return escState, escState.parser.collectInter() + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeIntermediateToGroundBytes, b): + return escState.parser.ground, nil + } + + return escState, nil +} + +func (escState escapeIntermediateState) Transition(s state) error { + escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + } + + return nil +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/escape_state.go b/extender/code/vendor/github.com/Azure/go-ansiterm/escape_state.go new file mode 100644 index 000000000..6390abd23 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -0,0 +1,47 @@ +package ansiterm + +type escapeState struct { + baseState +} + +func (escState escapeState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case b == ANSI_ESCAPE_SECONDARY: + return escState.parser.csiEntry, nil + case b == ANSI_OSC_STRING_ENTRY: + return escState.parser.oscString, nil + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeToGroundBytes, b): + return escState.parser.ground, nil + case sliceContains(intermeds, b): + return escState.parser.escapeIntermediate, nil + } + + return escState, nil +} + +func (escState escapeState) Transition(s state) error { + escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + case escState.parser.escapeIntermediate: + return escState.parser.collectInter() + } + + return nil +} + +func (escState escapeState) Enter() error { + escState.parser.clear() + return nil +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/event_handler.go b/extender/code/vendor/github.com/Azure/go-ansiterm/event_handler.go new file mode 100644 index 000000000..98087b38c --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/event_handler.go @@ -0,0 +1,90 @@ +package ansiterm + +type AnsiEventHandler interface { + // Print + Print(b byte) error + + // Execute C0 commands + Execute(b byte) error + + // CUrsor Up + CUU(int) error + + // CUrsor Down + CUD(int) error + + // CUrsor Forward + CUF(int) error + + // CUrsor Backward + CUB(int) error + + // Cursor to Next Line + CNL(int) error + + // Cursor to Previous Line + CPL(int) error + + // Cursor Horizontal position Absolute + CHA(int) error + + // Vertical line Position Absolute + VPA(int) error + + // CUrsor Position + CUP(int, int) error + + // Horizontal and Vertical Position (depends on PUM) + HVP(int, int) error + + // Text Cursor Enable Mode + DECTCEM(bool) error + + // Origin Mode + DECOM(bool) error + + // 132 Column Mode + DECCOLM(bool) error + + // Erase in Display + ED(int) error + + // Erase in Line + EL(int) error + + // Insert Line + IL(int) error + + // Delete Line + DL(int) error + + // Insert Character + ICH(int) error + + // Delete Character + DCH(int) error + + // Set Graphics Rendition + SGR([]int) error + + // Pan Down + SU(int) error + + // Pan Up + SD(int) error + + // Device Attributes + DA([]string) error + + // Set Top and Bottom Margins + DECSTBM(int, int) error + + // Index + IND() error + + // Reverse Index + RI() error + + // Flush updates from previous commands + Flush() error +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/ground_state.go b/extender/code/vendor/github.com/Azure/go-ansiterm/ground_state.go new file mode 100644 index 000000000..52451e946 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/ground_state.go @@ -0,0 +1,24 @@ +package ansiterm + +type groundState struct { + baseState +} + +func (gs groundState) Handle(b byte) (s state, e error) { + gs.parser.context.currentChar = b + + nextState, err := gs.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(printables, b): + return gs, gs.parser.print() + + case sliceContains(executors, b): + return gs, gs.parser.execute() + } + + return gs, nil +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/extender/code/vendor/github.com/Azure/go-ansiterm/osc_string_state.go new file mode 100644 index 000000000..593b10ab6 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -0,0 +1,31 @@ +package ansiterm + +type oscStringState struct { + baseState +} + +func (oscState oscStringState) Handle(b byte) (s state, e error) { + oscState.parser.logf("OscString::Handle %#x", b) + nextState, err := oscState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case isOscStringTerminator(b): + return oscState.parser.ground, nil + } + + return oscState, nil +} + +// See below for OSC string terminators for linux +// http://man7.org/linux/man-pages/man4/console_codes.4.html +func isOscStringTerminator(b byte) bool { + + if b == ANSI_BEL || b == 0x5C { + return true + } + + return false +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/parser.go b/extender/code/vendor/github.com/Azure/go-ansiterm/parser.go new file mode 100644 index 000000000..03cec7ada --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/parser.go @@ -0,0 +1,151 @@ +package ansiterm + +import ( + "errors" + "log" + "os" +) + +type AnsiParser struct { + currState state + eventHandler AnsiEventHandler + context *ansiContext + csiEntry state + csiParam state + dcsEntry state + escape state + escapeIntermediate state + error state + ground state + oscString state + stateMap []state + + logf func(string, ...interface{}) +} + +type Option func(*AnsiParser) + +func WithLogf(f func(string, ...interface{})) Option { + return func(ap *AnsiParser) { + ap.logf = f + } +} + +func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { + ap := &AnsiParser{ + eventHandler: evtHandler, + context: &ansiContext{}, + } + for _, o := range opts { + o(ap) + } + + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("ansiParser.log") + logger := log.New(logFile, "", log.LstdFlags) + if ap.logf != nil { + l := ap.logf + ap.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + ap.logf = logger.Printf + } + } + + if ap.logf == nil { + ap.logf = func(string, ...interface{}) {} + } + + ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} + ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} + ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} + ap.escape = escapeState{baseState{name: "Escape", parser: ap}} + ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} + ap.error = errorState{baseState{name: "Error", parser: ap}} + ap.ground = groundState{baseState{name: "Ground", parser: ap}} + ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} + + ap.stateMap = []state{ + ap.csiEntry, + ap.csiParam, + ap.dcsEntry, + ap.escape, + ap.escapeIntermediate, + ap.error, + ap.ground, + ap.oscString, + } + + ap.currState = getState(initialState, ap.stateMap) + + ap.logf("CreateParser: parser %p", ap) + return ap +} + +func getState(name string, states []state) state { + for _, el := range states { + if el.Name() == name { + return el + } + } + + return nil +} + +func (ap *AnsiParser) Parse(bytes []byte) (int, error) { + for i, b := range bytes { + if err := ap.handle(b); err != nil { + return i, err + } + } + + return len(bytes), ap.eventHandler.Flush() +} + +func (ap *AnsiParser) handle(b byte) error { + ap.context.currentChar = b + newState, err := ap.currState.Handle(b) + if err != nil { + return err + } + + if newState == nil { + ap.logf("WARNING: newState is nil") + return errors.New("New state of 'nil' is invalid.") + } + + if newState != ap.currState { + if err := ap.changeState(newState); err != nil { + return err + } + } + + return nil +} + +func (ap *AnsiParser) changeState(newState state) error { + ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) + + // Exit old state + if err := ap.currState.Exit(); err != nil { + ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) + return err + } + + // Perform transition action + if err := ap.currState.Transition(newState); err != nil { + ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) + return err + } + + // Enter new state + if err := newState.Enter(); err != nil { + ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) + return err + } + + ap.currState = newState + return nil +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/extender/code/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go new file mode 100644 index 000000000..de0a1f9cd --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -0,0 +1,99 @@ +package ansiterm + +import ( + "strconv" +) + +func parseParams(bytes []byte) ([]string, error) { + paramBuff := make([]byte, 0, 0) + params := []string{} + + for _, v := range bytes { + if v == ';' { + if len(paramBuff) > 0 { + // Completed parameter, append it to the list + s := string(paramBuff) + params = append(params, s) + paramBuff = make([]byte, 0, 0) + } + } else { + paramBuff = append(paramBuff, v) + } + } + + // Last parameter may not be terminated with ';' + if len(paramBuff) > 0 { + s := string(paramBuff) + params = append(params, s) + } + + return params, nil +} + +func parseCmd(context ansiContext) (string, error) { + return string(context.currentChar), nil +} + +func getInt(params []string, dflt int) int { + i := getInts(params, 1, dflt)[0] + return i +} + +func getInts(params []string, minCount int, dflt int) []int { + ints := []int{} + + for _, v := range params { + i, _ := strconv.Atoi(v) + // Zero is mapped to the default value in VT100. + if i == 0 { + i = dflt + } + ints = append(ints, i) + } + + if len(ints) < minCount { + remaining := minCount - len(ints) + for i := 0; i < remaining; i++ { + ints = append(ints, dflt) + } + } + + return ints +} + +func (ap *AnsiParser) modeDispatch(param string, set bool) error { + switch param { + case "?3": + return ap.eventHandler.DECCOLM(set) + case "?6": + return ap.eventHandler.DECOM(set) + case "?25": + return ap.eventHandler.DECTCEM(set) + } + return nil +} + +func (ap *AnsiParser) hDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], true) + } + + return nil +} + +func (ap *AnsiParser) lDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], false) + } + + return nil +} + +func getEraseParam(params []string) int { + param := getInt(params, 0) + if param < 0 || 3 < param { + param = 0 + } + + return param +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/extender/code/vendor/github.com/Azure/go-ansiterm/parser_actions.go new file mode 100644 index 000000000..0bb5e51e9 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -0,0 +1,119 @@ +package ansiterm + +func (ap *AnsiParser) collectParam() error { + currChar := ap.context.currentChar + ap.logf("collectParam %#x", currChar) + ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) + return nil +} + +func (ap *AnsiParser) collectInter() error { + currChar := ap.context.currentChar + ap.logf("collectInter %#x", currChar) + ap.context.paramBuffer = append(ap.context.interBuffer, currChar) + return nil +} + +func (ap *AnsiParser) escDispatch() error { + cmd, _ := parseCmd(*ap.context) + intermeds := ap.context.interBuffer + ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) + ap.logf("escDispatch: %v(%v)", cmd, intermeds) + + switch cmd { + case "D": // IND + return ap.eventHandler.IND() + case "E": // NEL, equivalent to CRLF + err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) + if err == nil { + err = ap.eventHandler.Execute(ANSI_LINE_FEED) + } + return err + case "M": // RI + return ap.eventHandler.RI() + } + + return nil +} + +func (ap *AnsiParser) csiDispatch() error { + cmd, _ := parseCmd(*ap.context) + params, _ := parseParams(ap.context.paramBuffer) + ap.logf("Parsed params: %v with length: %d", params, len(params)) + + ap.logf("csiDispatch: %v(%v)", cmd, params) + + switch cmd { + case "@": + return ap.eventHandler.ICH(getInt(params, 1)) + case "A": + return ap.eventHandler.CUU(getInt(params, 1)) + case "B": + return ap.eventHandler.CUD(getInt(params, 1)) + case "C": + return ap.eventHandler.CUF(getInt(params, 1)) + case "D": + return ap.eventHandler.CUB(getInt(params, 1)) + case "E": + return ap.eventHandler.CNL(getInt(params, 1)) + case "F": + return ap.eventHandler.CPL(getInt(params, 1)) + case "G": + return ap.eventHandler.CHA(getInt(params, 1)) + case "H": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.CUP(x, y) + case "J": + param := getEraseParam(params) + return ap.eventHandler.ED(param) + case "K": + param := getEraseParam(params) + return ap.eventHandler.EL(param) + case "L": + return ap.eventHandler.IL(getInt(params, 1)) + case "M": + return ap.eventHandler.DL(getInt(params, 1)) + case "P": + return ap.eventHandler.DCH(getInt(params, 1)) + case "S": + return ap.eventHandler.SU(getInt(params, 1)) + case "T": + return ap.eventHandler.SD(getInt(params, 1)) + case "c": + return ap.eventHandler.DA(params) + case "d": + return ap.eventHandler.VPA(getInt(params, 1)) + case "f": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.HVP(x, y) + case "h": + return ap.hDispatch(params) + case "l": + return ap.lDispatch(params) + case "m": + return ap.eventHandler.SGR(getInts(params, 1, 0)) + case "r": + ints := getInts(params, 2, 1) + top, bottom := ints[0], ints[1] + return ap.eventHandler.DECSTBM(top, bottom) + default: + ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) + return nil + } + +} + +func (ap *AnsiParser) print() error { + return ap.eventHandler.Print(ap.context.currentChar) +} + +func (ap *AnsiParser) clear() error { + ap.context = &ansiContext{} + return nil +} + +func (ap *AnsiParser) execute() error { + return ap.eventHandler.Execute(ap.context.currentChar) +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/states.go b/extender/code/vendor/github.com/Azure/go-ansiterm/states.go new file mode 100644 index 000000000..f2ea1fcd1 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/states.go @@ -0,0 +1,71 @@ +package ansiterm + +type stateID int + +type state interface { + Enter() error + Exit() error + Handle(byte) (state, error) + Name() string + Transition(state) error +} + +type baseState struct { + name string + parser *AnsiParser +} + +func (base baseState) Enter() error { + return nil +} + +func (base baseState) Exit() error { + return nil +} + +func (base baseState) Handle(b byte) (s state, e error) { + + switch { + case b == CSI_ENTRY: + return base.parser.csiEntry, nil + case b == DCS_ENTRY: + return base.parser.dcsEntry, nil + case b == ANSI_ESCAPE_PRIMARY: + return base.parser.escape, nil + case b == OSC_STRING: + return base.parser.oscString, nil + case sliceContains(toGroundBytes, b): + return base.parser.ground, nil + } + + return nil, nil +} + +func (base baseState) Name() string { + return base.name +} + +func (base baseState) Transition(s state) error { + if s == base.parser.ground { + execBytes := []byte{0x18} + execBytes = append(execBytes, 0x1A) + execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) + execBytes = append(execBytes, getByteRange(0x91, 0x97)...) + execBytes = append(execBytes, 0x99) + execBytes = append(execBytes, 0x9A) + + if sliceContains(execBytes, base.parser.context.currentChar) { + return base.parser.execute() + } + } + + return nil +} + +type dcsEntryState struct { + baseState +} + +type errorState struct { + baseState +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/utilities.go b/extender/code/vendor/github.com/Azure/go-ansiterm/utilities.go new file mode 100644 index 000000000..392114493 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/utilities.go @@ -0,0 +1,21 @@ +package ansiterm + +import ( + "strconv" +) + +func sliceContains(bytes []byte, b byte) bool { + for _, v := range bytes { + if v == b { + return true + } + } + + return false +} + +func convertBytesToInteger(bytes []byte) int { + s := string(bytes) + i, _ := strconv.Atoi(s) + return i +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go new file mode 100644 index 000000000..a67327972 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -0,0 +1,182 @@ +// +build windows + +package winterm + +import ( + "fmt" + "os" + "strconv" + "strings" + "syscall" + + "github.com/Azure/go-ansiterm" +) + +// Windows keyboard constants +// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. +const ( + VK_PRIOR = 0x21 // PAGE UP key + VK_NEXT = 0x22 // PAGE DOWN key + VK_END = 0x23 // END key + VK_HOME = 0x24 // HOME key + VK_LEFT = 0x25 // LEFT ARROW key + VK_UP = 0x26 // UP ARROW key + VK_RIGHT = 0x27 // RIGHT ARROW key + VK_DOWN = 0x28 // DOWN ARROW key + VK_SELECT = 0x29 // SELECT key + VK_PRINT = 0x2A // PRINT key + VK_EXECUTE = 0x2B // EXECUTE key + VK_SNAPSHOT = 0x2C // PRINT SCREEN key + VK_INSERT = 0x2D // INS key + VK_DELETE = 0x2E // DEL key + VK_HELP = 0x2F // HELP key + VK_F1 = 0x70 // F1 key + VK_F2 = 0x71 // F2 key + VK_F3 = 0x72 // F3 key + VK_F4 = 0x73 // F4 key + VK_F5 = 0x74 // F5 key + VK_F6 = 0x75 // F6 key + VK_F7 = 0x76 // F7 key + VK_F8 = 0x77 // F8 key + VK_F9 = 0x78 // F9 key + VK_F10 = 0x79 // F10 key + VK_F11 = 0x7A // F11 key + VK_F12 = 0x7B // F12 key + + RIGHT_ALT_PRESSED = 0x0001 + LEFT_ALT_PRESSED = 0x0002 + RIGHT_CTRL_PRESSED = 0x0004 + LEFT_CTRL_PRESSED = 0x0008 + SHIFT_PRESSED = 0x0010 + NUMLOCK_ON = 0x0020 + SCROLLLOCK_ON = 0x0040 + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 +) + +type ansiCommand struct { + CommandBytes []byte + Command string + Parameters []string + IsSpecial bool +} + +func newAnsiCommand(command []byte) *ansiCommand { + + if isCharacterSelectionCmdChar(command[1]) { + // Is Character Set Selection commands + return &ansiCommand{ + CommandBytes: command, + Command: string(command), + IsSpecial: true, + } + } + + // last char is command character + lastCharIndex := len(command) - 1 + + ac := &ansiCommand{ + CommandBytes: command, + Command: string(command[lastCharIndex]), + IsSpecial: false, + } + + // more than a single escape + if lastCharIndex != 0 { + start := 1 + // skip if double char escape sequence + if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { + start++ + } + // convert this to GetNextParam method + ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) + } + + return ac +} + +func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { + if index < 0 || index >= len(ac.Parameters) { + return defaultValue + } + + param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) + if err != nil { + return defaultValue + } + + return int16(param) +} + +func (ac *ansiCommand) String() string { + return fmt.Sprintf("0x%v \"%v\" (\"%v\")", + bytesToHex(ac.CommandBytes), + ac.Command, + strings.Join(ac.Parameters, "\",\"")) +} + +// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. +// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. +func isAnsiCommandChar(b byte) bool { + switch { + case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: + return true + case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: + // non-CSI escape sequence terminator + return true + case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: + // String escape sequence terminator + return true + } + return false +} + +func isXtermOscSequence(command []byte, current byte) bool { + return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) +} + +func isCharacterSelectionCmdChar(b byte) bool { + return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) +} + +// bytesToHex converts a slice of bytes to a human-readable string. +func bytesToHex(b []byte) string { + hex := make([]string, len(b)) + for i, ch := range b { + hex[i] = fmt.Sprintf("%X", ch) + } + return strings.Join(hex, "") +} + +// ensureInRange adjusts the passed value, if necessary, to ensure it is within +// the passed min / max range. +func ensureInRange(n int16, min int16, max int16) int16 { + if n < min { + return min + } else if n > max { + return max + } else { + return n + } +} + +func GetStdFile(nFile int) (*os.File, uintptr) { + var file *os.File + switch nFile { + case syscall.STD_INPUT_HANDLE: + file = os.Stdin + case syscall.STD_OUTPUT_HANDLE: + file = os.Stdout + case syscall.STD_ERROR_HANDLE: + file = os.Stderr + default: + panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) + } + + fd, err := syscall.GetStdHandle(nFile) + if err != nil { + panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) + } + + return file, uintptr(fd) +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/api.go new file mode 100644 index 000000000..6055e33b9 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -0,0 +1,327 @@ +// +build windows + +package winterm + +import ( + "fmt" + "syscall" + "unsafe" +) + +//=========================================================================================================== +// IMPORTANT NOTE: +// +// The methods below make extensive use of the "unsafe" package to obtain the required pointers. +// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack +// variables) the pointers reference *before* the API completes. +// +// As a result, in those cases, the code must hint that the variables remain in active by invoking the +// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer +// require unsafe pointers. +// +// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform +// the garbage collector the variables remain in use if: +// +// -- The value is not a pointer (e.g., int32, struct) +// -- The value is not referenced by the method after passing the pointer to Windows +// +// See http://golang.org/doc/go1.3. +//=========================================================================================================== + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + + getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") + setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") + setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") + setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") + scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") + setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") + writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") + readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") + waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") +) + +// Windows Console constants +const ( + // Console modes + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + ENABLE_AUTO_POSITION = 0x0100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 + + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + DISABLE_NEWLINE_AUTO_RETURN = 0x0008 + ENABLE_LVB_GRID_WORLDWIDE = 0x0010 + + // Character attributes + // Note: + // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). + // Clearing all foreground or background colors results in black; setting all creates white. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. + FOREGROUND_BLUE uint16 = 0x0001 + FOREGROUND_GREEN uint16 = 0x0002 + FOREGROUND_RED uint16 = 0x0004 + FOREGROUND_INTENSITY uint16 = 0x0008 + FOREGROUND_MASK uint16 = 0x000F + + BACKGROUND_BLUE uint16 = 0x0010 + BACKGROUND_GREEN uint16 = 0x0020 + BACKGROUND_RED uint16 = 0x0040 + BACKGROUND_INTENSITY uint16 = 0x0080 + BACKGROUND_MASK uint16 = 0x00F0 + + COMMON_LVB_MASK uint16 = 0xFF00 + COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 + COMMON_LVB_UNDERSCORE uint16 = 0x8000 + + // Input event types + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + KEY_EVENT = 0x0001 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 + MENU_EVENT = 0x0008 + FOCUS_EVENT = 0x0010 + + // WaitForSingleObject return codes + WAIT_ABANDONED = 0x00000080 + WAIT_FAILED = 0xFFFFFFFF + WAIT_SIGNALED = 0x0000000 + WAIT_TIMEOUT = 0x00000102 + + // WaitForSingleObject wait duration + WAIT_INFINITE = 0xFFFFFFFF + WAIT_ONE_SECOND = 1000 + WAIT_HALF_SECOND = 500 + WAIT_QUARTER_SECOND = 250 +) + +// Windows API Console types +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) +// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment +type ( + CHAR_INFO struct { + UnicodeChar uint16 + Attributes uint16 + } + + CONSOLE_CURSOR_INFO struct { + Size uint32 + Visible int32 + } + + CONSOLE_SCREEN_BUFFER_INFO struct { + Size COORD + CursorPosition COORD + Attributes uint16 + Window SMALL_RECT + MaximumWindowSize COORD + } + + COORD struct { + X int16 + Y int16 + } + + SMALL_RECT struct { + Left int16 + Top int16 + Right int16 + Bottom int16 + } + + // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + INPUT_RECORD struct { + EventType uint16 + KeyEvent KEY_EVENT_RECORD + } + + KEY_EVENT_RECORD struct { + KeyDown int32 + RepeatCount uint16 + VirtualKeyCode uint16 + VirtualScanCode uint16 + UnicodeChar uint16 + ControlKeyState uint32 + } + + WINDOW_BUFFER_SIZE struct { + Size COORD + } +) + +// boolToBOOL converts a Go bool into a Windows int32. +func boolToBOOL(f bool) int32 { + if f { + return int32(1) + } else { + return int32(0) + } +} + +// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. +func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorInfo sets the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. +func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorPosition location of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. +func SetConsoleCursorPosition(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// GetConsoleMode gets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. +func GetConsoleMode(handle uintptr) (mode uint32, err error) { + err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) + return mode, err +} + +// SetConsoleMode sets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. +func SetConsoleMode(handle uintptr, mode uint32) error { + r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) + use(mode) + return checkError(r1, r2, err) +} + +// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. +func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + info := CONSOLE_SCREEN_BUFFER_INFO{} + err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) + if err != nil { + return nil, err + } + return &info, nil +} + +func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { + r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) + use(scrollRect) + use(clipRect) + use(destOrigin) + use(char) + return checkError(r1, r2, err) +} + +// SetConsoleScreenBufferSize sets the size of the console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. +func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// SetConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. +func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { + r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) + use(attribute) + return checkError(r1, r2, err) +} + +// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. +// Note that the size and location must be within and no larger than the backing console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. +func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { + r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) + use(isAbsolute) + use(rect) + return checkError(r1, r2, err) +} + +// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. +func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { + r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) + use(buffer) + use(bufferSize) + use(bufferCoord) + return checkError(r1, r2, err) +} + +// ReadConsoleInput reads (and removes) data from the console input buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. +func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { + r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) + use(buffer) + return checkError(r1, r2, err) +} + +// WaitForSingleObject waits for the passed handle to be signaled. +// It returns true if the handle was signaled; false otherwise. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. +func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { + r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) + switch r1 { + case WAIT_ABANDONED, WAIT_TIMEOUT: + return false, nil + case WAIT_SIGNALED: + return true, nil + } + use(msWait) + return false, err +} + +// String helpers +func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { + return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) +} + +func (coord COORD) String() string { + return fmt.Sprintf("%v,%v", coord.X, coord.Y) +} + +func (rect SMALL_RECT) String() string { + return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) +} + +// checkError evaluates the results of a Windows API call and returns the error if it failed. +func checkError(r1, r2 uintptr, err error) error { + // Windows APIs return non-zero to indicate success + if r1 != 0 { + return nil + } + + // Return the error if provided, otherwise default to EINVAL + if err != nil { + return err + } + return syscall.EINVAL +} + +// coordToPointer converts a COORD into a uintptr (by fooling the type system). +func coordToPointer(c COORD) uintptr { + // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. + return uintptr(*((*uint32)(unsafe.Pointer(&c)))) +} + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +func use(p interface{}) {} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go new file mode 100644 index 000000000..cbec8f728 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go @@ -0,0 +1,100 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +const ( + FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE +) + +// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the +// request represented by the passed ANSI mode. +func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { + switch ansiMode { + + // Mode styles + case ansiterm.ANSI_SGR_BOLD: + windowsMode = windowsMode | FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: + windowsMode &^= FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_UNDERLINE: + windowsMode = windowsMode | COMMON_LVB_UNDERSCORE + + case ansiterm.ANSI_SGR_REVERSE: + inverted = true + + case ansiterm.ANSI_SGR_REVERSE_OFF: + inverted = false + + case ansiterm.ANSI_SGR_UNDERLINE_OFF: + windowsMode &^= COMMON_LVB_UNDERSCORE + + // Foreground colors + case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: + windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_BLACK: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_RED: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED + + case ansiterm.ANSI_SGR_FOREGROUND_GREEN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_BLUE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_CYAN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_WHITE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + + // Background colors + case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: + // Black with no intensity + windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_BLACK: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_RED: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED + + case ansiterm.ANSI_SGR_BACKGROUND_GREEN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_BLUE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_CYAN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_WHITE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE + } + + return windowsMode, inverted +} + +// invertAttributes inverts the foreground and background colors of a Windows attributes value +func invertAttributes(windowsMode uint16) uint16 { + return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go new file mode 100644 index 000000000..3ee06ea72 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -0,0 +1,101 @@ +// +build windows + +package winterm + +const ( + horizontal = iota + vertical +) + +func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { + if h.originMode { + sr := h.effectiveSr(info.Window) + return SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + } else { + return SMALL_RECT{ + Top: info.Window.Top, + Bottom: info.Window.Bottom, + Left: 0, + Right: info.Size.X - 1, + } + } +} + +// setCursorPosition sets the cursor to the specified position, bounded to the screen size +func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { + position.X = ensureInRange(position.X, window.Left, window.Right) + position.Y = ensureInRange(position.Y, window.Top, window.Bottom) + err := SetConsoleCursorPosition(h.fd, position) + if err != nil { + return err + } + h.logf("Cursor position set: (%d, %d)", position.X, position.Y) + return err +} + +func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { + return h.moveCursor(vertical, param) +} + +func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { + return h.moveCursor(horizontal, param) +} + +func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + switch moveMode { + case horizontal: + position.X += int16(param) + case vertical: + position.Y += int16(param) + } + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = 0 + position.Y += int16(param) + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = int16(param) - 1 + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go new file mode 100644 index 000000000..244b5fa25 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go @@ -0,0 +1,84 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { + // Ignore an invalid (negative area) request + if toCoord.Y < fromCoord.Y { + return nil + } + + var err error + + var coordStart = COORD{} + var coordEnd = COORD{} + + xCurrent, yCurrent := fromCoord.X, fromCoord.Y + xEnd, yEnd := toCoord.X, toCoord.Y + + // Clear any partial initial line + if xCurrent > 0 { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yCurrent + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent += 1 + } + + // Clear intervening rectangular section + if yCurrent < yEnd { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd-1 + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent = yEnd + } + + // Clear remaining partial ending line + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { + region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} + width := toCoord.X - fromCoord.X + 1 + height := toCoord.Y - fromCoord.Y + 1 + size := uint32(width) * uint32(height) + + if size <= 0 { + return nil + } + + buffer := make([]CHAR_INFO, size) + + char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} + for i := 0; i < int(size); i++ { + buffer[i] = char + } + + err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) + if err != nil { + return err + } + + return nil +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go new file mode 100644 index 000000000..2d27fa1d0 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -0,0 +1,118 @@ +// +build windows + +package winterm + +// effectiveSr gets the current effective scroll region in buffer coordinates +func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { + top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) + bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) + if top >= bottom { + top = window.Top + bottom = window.Bottom + } + return scrollRegion{top: top, bottom: bottom} +} + +func (h *windowsAnsiEventHandler) scrollUp(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + return h.scroll(param, sr, info) +} + +func (h *windowsAnsiEventHandler) scrollDown(param int) error { + return h.scrollUp(-param) +} + +func (h *windowsAnsiEventHandler) deleteLines(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + start := info.CursorPosition.Y + sr := h.effectiveSr(info.Window) + // Lines cannot be inserted or deleted outside the scrolling region. + if start >= sr.top && start <= sr.bottom { + sr.top = start + return h.scroll(param, sr, info) + } else { + return nil + } +} + +func (h *windowsAnsiEventHandler) insertLines(param int) error { + return h.deleteLines(-param) +} + +// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. +func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { + h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) + h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) + + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: 0, + Y: sr.top - int16(param), + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} + +func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + return h.scrollLine(param, info.CursorPosition, info) +} + +func (h *windowsAnsiEventHandler) insertCharacters(param int) error { + return h.deleteCharacters(-param) +} + +// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. +func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: position.Y, + Bottom: position.Y, + Left: position.X, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: position.X - int16(columns), + Y: position.Y, + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go new file mode 100644 index 000000000..afa7635d7 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go @@ -0,0 +1,9 @@ +// +build windows + +package winterm + +// AddInRange increments a value by the passed quantity while ensuring the values +// always remain within the supplied min / max range. +func addInRange(n int16, increment int16, min int16, max int16) int16 { + return ensureInRange(n+increment, min, max) +} diff --git a/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go new file mode 100644 index 000000000..2d40fb75a --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -0,0 +1,743 @@ +// +build windows + +package winterm + +import ( + "bytes" + "log" + "os" + "strconv" + + "github.com/Azure/go-ansiterm" +) + +type windowsAnsiEventHandler struct { + fd uintptr + file *os.File + infoReset *CONSOLE_SCREEN_BUFFER_INFO + sr scrollRegion + buffer bytes.Buffer + attributes uint16 + inverted bool + wrapNext bool + drewMarginByte bool + originMode bool + marginByte byte + curInfo *CONSOLE_SCREEN_BUFFER_INFO + curPos COORD + logf func(string, ...interface{}) +} + +type Option func(*windowsAnsiEventHandler) + +func WithLogf(f func(string, ...interface{})) Option { + return func(w *windowsAnsiEventHandler) { + w.logf = f + } +} + +func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { + infoReset, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + h := &windowsAnsiEventHandler{ + fd: fd, + file: file, + infoReset: infoReset, + attributes: infoReset.Attributes, + } + for _, o := range opts { + o(h) + } + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("winEventHandler.log") + logger := log.New(logFile, "", log.LstdFlags) + if h.logf != nil { + l := h.logf + h.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + h.logf = logger.Printf + } + } + + if h.logf == nil { + h.logf = func(string, ...interface{}) {} + } + + return h +} + +type scrollRegion struct { + top int16 + bottom int16 +} + +// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the +// current cursor position and scroll region settings, in which case it returns +// true. If no special handling is necessary, then it does nothing and returns +// false. +// +// In the false case, the caller should ensure that a carriage return +// and line feed are inserted or that the text is otherwise wrapped. +func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { + if h.wrapNext { + if err := h.Flush(); err != nil { + return false, err + } + h.clearWrap() + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return false, err + } + sr := h.effectiveSr(info.Window) + if pos.Y == sr.bottom { + // Scrolling is necessary. Let Windows automatically scroll if the scrolling region + // is the full window. + if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { + if includeCR { + pos.X = 0 + h.updatePos(pos) + } + return false, nil + } + + // A custom scroll region is active. Scroll the window manually to simulate + // the LF. + if err := h.Flush(); err != nil { + return false, err + } + h.logf("Simulating LF inside scroll region") + if err := h.scrollUp(1); err != nil { + return false, err + } + if includeCR { + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + + } else if pos.Y < info.Window.Bottom { + // Let Windows handle the LF. + pos.Y++ + if includeCR { + pos.X = 0 + } + h.updatePos(pos) + return false, nil + } else { + // The cursor is at the bottom of the screen but outside the scroll + // region. Skip the LF. + h.logf("Simulating LF outside scroll region") + if includeCR { + if err := h.Flush(); err != nil { + return false, err + } + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + } +} + +// executeLF executes a LF without a CR. +func (h *windowsAnsiEventHandler) executeLF() error { + handled, err := h.simulateLF(false) + if err != nil { + return err + } + if !handled { + // Windows LF will reset the cursor column position. Write the LF + // and restore the cursor position. + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + if pos.X != 0 { + if err := h.Flush(); err != nil { + return err + } + h.logf("Resetting cursor position for LF without CR") + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + } + return nil +} + +func (h *windowsAnsiEventHandler) Print(b byte) error { + if h.wrapNext { + h.buffer.WriteByte(h.marginByte) + h.clearWrap() + if _, err := h.simulateLF(true); err != nil { + return err + } + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X == info.Size.X-1 { + h.wrapNext = true + h.marginByte = b + } else { + pos.X++ + h.updatePos(pos) + h.buffer.WriteByte(b) + } + return nil +} + +func (h *windowsAnsiEventHandler) Execute(b byte) error { + switch b { + case ansiterm.ANSI_TAB: + h.logf("Execute(TAB)") + // Move to the next tab stop, but preserve auto-wrap if already set. + if !h.wrapNext { + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + pos.X = (pos.X + 8) - pos.X%8 + if pos.X >= info.Size.X { + pos.X = info.Size.X - 1 + } + if err := h.Flush(); err != nil { + return err + } + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + return nil + + case ansiterm.ANSI_BEL: + h.buffer.WriteByte(ansiterm.ANSI_BEL) + return nil + + case ansiterm.ANSI_BACKSPACE: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X > 0 { + pos.X-- + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) + } + return nil + + case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: + // Treat as true LF. + return h.executeLF() + + case ansiterm.ANSI_LINE_FEED: + // Simulate a CR and LF for now since there is no way in go-ansiterm + // to tell if the LF should include CR (and more things break when it's + // missing than when it's incorrectly added). + handled, err := h.simulateLF(true) + if handled || err != nil { + return err + } + return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + + case ansiterm.ANSI_CARRIAGE_RETURN: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X != 0 { + pos.X = 0 + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) + } + return nil + + default: + return nil + } +} + +func (h *windowsAnsiEventHandler) CUU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(-param) +} + +func (h *windowsAnsiEventHandler) CUD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(param) +} + +func (h *windowsAnsiEventHandler) CUF(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(param) +} + +func (h *windowsAnsiEventHandler) CUB(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(-param) +} + +func (h *windowsAnsiEventHandler) CNL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(param) +} + +func (h *windowsAnsiEventHandler) CPL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(-param) +} + +func (h *windowsAnsiEventHandler) CHA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorColumn(param) +} + +func (h *windowsAnsiEventHandler) VPA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("VPA: [[%d]]", param) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + window := h.getCursorWindow(info) + position := info.CursorPosition + position.Y = window.Top + int16(param) - 1 + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) CUP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUP: [[%d %d]]", row, col) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + window := h.getCursorWindow(info) + position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) HVP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("HVP: [[%d %d]]", row, col) + h.clearWrap() + return h.CUP(row, col) +} + +func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) + h.clearWrap() + return nil +} + +func (h *windowsAnsiEventHandler) DECOM(enable bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) + h.clearWrap() + h.originMode = enable + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) + h.clearWrap() + if err := h.ED(2); err != nil { + return err + } + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + targetWidth := int16(80) + if use132 { + targetWidth = 132 + } + if info.Size.X < targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + window := info.Window + window.Left = 0 + window.Right = targetWidth - 1 + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + h.logf("set window failed: %v", err) + return err + } + if info.Size.X > targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + return SetConsoleCursorPosition(h.fd, COORD{0, 0}) +} + +func (h *windowsAnsiEventHandler) ED(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ED: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + + // [J -- Erases from the cursor to the end of the screen, including the cursor position. + // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. + // [2J -- Erases the complete display. The cursor does not move. + // Notes: + // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X - 1, info.Size.Y - 1} + + case 1: + start = COORD{0, 0} + end = info.CursorPosition + + case 2: + start = COORD{0, 0} + end = COORD{info.Size.X - 1, info.Size.Y - 1} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + // If the whole buffer was cleared, move the window to the top while preserving + // the window-relative cursor position. + if param == 2 { + pos := info.CursorPosition + window := info.Window + pos.Y -= window.Top + window.Bottom -= window.Top + window.Top = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + return err + } + } + + return nil +} + +func (h *windowsAnsiEventHandler) EL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("EL: [%v]", strconv.Itoa(param)) + h.clearWrap() + + // [K -- Erases from the cursor to the end of the line, including the cursor position. + // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. + // [2K -- Erases the complete line. + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X, info.CursorPosition.Y} + + case 1: + start = COORD{0, info.CursorPosition.Y} + end = info.CursorPosition + + case 2: + start = COORD{0, info.CursorPosition.Y} + end = COORD{info.Size.X, info.CursorPosition.Y} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) IL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("IL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertLines(param) +} + +func (h *windowsAnsiEventHandler) DL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteLines(param) +} + +func (h *windowsAnsiEventHandler) ICH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ICH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertCharacters(param) +} + +func (h *windowsAnsiEventHandler) DCH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DCH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteCharacters(param) +} + +func (h *windowsAnsiEventHandler) SGR(params []int) error { + if err := h.Flush(); err != nil { + return err + } + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + h.logf("SGR: [%v]", strings) + + if len(params) <= 0 { + h.attributes = h.infoReset.Attributes + h.inverted = false + } else { + for _, attr := range params { + + if attr == ansiterm.ANSI_SGR_RESET { + h.attributes = h.infoReset.Attributes + h.inverted = false + continue + } + + h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) + } + } + + attributes := h.attributes + if h.inverted { + attributes = invertAttributes(attributes) + } + err := SetConsoleTextAttribute(h.fd, attributes) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) SU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollUp(param) +} + +func (h *windowsAnsiEventHandler) SD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollDown(param) +} + +func (h *windowsAnsiEventHandler) DA(params []string) error { + h.logf("DA: [%v]", params) + // DA cannot be implemented because it must send data on the VT100 input stream, + // which is not available to go-ansiterm. + return nil +} + +func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECSTBM: [%d, %d]", top, bottom) + + // Windows is 0 indexed, Linux is 1 indexed + h.sr.top = int16(top - 1) + h.sr.bottom = int16(bottom - 1) + + // This command also moves the cursor to the origin. + h.clearWrap() + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) RI() error { + if err := h.Flush(); err != nil { + return err + } + h.logf("RI: []") + h.clearWrap() + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + if info.CursorPosition.Y == sr.top { + return h.scrollDown(1) + } + + return h.moveCursorVertical(-1) +} + +func (h *windowsAnsiEventHandler) IND() error { + h.logf("IND: []") + return h.executeLF() +} + +func (h *windowsAnsiEventHandler) Flush() error { + h.curInfo = nil + if h.buffer.Len() > 0 { + h.logf("Flush: [%s]", h.buffer.Bytes()) + if _, err := h.buffer.WriteTo(h.file); err != nil { + return err + } + } + + if h.wrapNext && !h.drewMarginByte { + h.logf("Flush: drawing margin byte '%c'", h.marginByte) + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} + size := COORD{1, 1} + position := COORD{0, 0} + region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} + if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { + return err + } + h.drewMarginByte = true + } + return nil +} + +// cacheConsoleInfo ensures that the current console screen information has been queried +// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. +func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { + if h.curInfo == nil { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return COORD{}, nil, err + } + h.curInfo = info + h.curPos = info.CursorPosition + } + return h.curPos, h.curInfo, nil +} + +func (h *windowsAnsiEventHandler) updatePos(pos COORD) { + if h.curInfo == nil { + panic("failed to call getCurrentInfo before calling updatePos") + } + h.curPos = pos +} + +// clearWrap clears the state where the cursor is in the margin +// waiting for the next character before wrapping the line. This must +// be done before most operations that act on the cursor. +func (h *windowsAnsiEventHandler) clearWrap() { + h.wrapNext = false + h.drewMarginByte = false +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/.gitignore b/extender/code/vendor/github.com/Azure/go-autorest/.gitignore new file mode 100644 index 000000000..3350aaf70 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/.gitignore @@ -0,0 +1,32 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.idea/ +.vscode/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff --git a/extender/code/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/extender/code/vendor/github.com/Azure/go-autorest/CHANGELOG.md new file mode 100644 index 000000000..d1f596bfc --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -0,0 +1,1004 @@ +# CHANGELOG + +## v14.2.0 + +- Added package comment to make `github.com/Azure/go-autorest` importable. + +## v14.1.1 + +### Bug Fixes + +- Change `x-ms-authorization-auxiliary` header value separator to comma. + +## v14.1.0 + +### New Features + +- Added `azure.SetEnvironment()` that will update the global environments map with the specified values. + +## v14.0.1 + +### Bug Fixes + +- Fix race condition when refreshing token. +- Fixed some tests to work with Go 1.14. + +## v14.0.0 + +## Breaking Changes + +- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`. + +## New Features + +- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap. + +## v13.4.0 + +## New Features + +- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client. +- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators. + +## v13.3.3 + +### Bug Fixes + +- Fixed connection leak when retrying requests. +- Enabled exponential back-off with a 2-minute cap when retrying on 429. +- Fixed some cases where errors were inadvertently dropped. + +## v13.3.2 + +### Bug Fixes + +- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation. + +## v13.3.1 + +- Updated external dependencies. + +### Bug Fixes + +## v13.3.0 + +### New Features + +- Added support for shared key and shared access signature token authorization. + - `autorest.NewSharedKeyAuthorizer()` and dependent types. + - `autorest.NewSASTokenAuthorizer()` and dependent types. +- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired. + +### Bug Fixes + +- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set. +- Support parsing error messages in XML responses. + +## v13.2.0 + +### New Features + +- Added the following functions to replace their versions that don't take a context. + - `adal.InitiateDeviceAuthWithContext()` + - `adal.CheckForUserCompletionWithContext()` + - `adal.WaitForUserCompletionWithContext()` + +## v13.1.0 + +### New Features + +- Added support for MSI authentication on Azure App Service and Azure Functions. + +## v13.0.2 + +### Bug Fixes + +- Always retry a request even if the sender returns a non-nil error. + +## v13.0.1 + +## Bug Fixes + +- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters. + +## v13.0.0 + +## Breaking Changes + +The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice. +What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED` +environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file. +```go + import _ "github.com/Azure/go-autorest/tracing/opencensus" +``` +The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added. +The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package). +- tracing.Transport +- tracing.Enable() +- tracing.EnableWithAIForwarding() +- tracing.Disable() + +The following APIs and types have been added +- tracing.Tracer +- tracing.Register() + +To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface. + +## v12.4.3 + +### Bug Fixes + +- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens. + +## v12.4.2 + +### Bug Fixes + +- Improvements to the fixes made in v12.4.1. + - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency. + - Switched to latest version of `ocagent` that still depends on protobuf v1.2. + - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum. + +## v12.4.1 + +### Bug Fixes + +- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes. +- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent. + +## v12.4.0 + +### New Features + +- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context. + +## v12.3.0 + +### New Features + +- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with + secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding + MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest. + The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS + is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer. + See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer` + along with their supporting types and methods. +- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context. +- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries. + +## v12.2.0 + +### New Features + +- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators. +- Added `autorest.ByUnmarshallingBytes` response decorator. +- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types. + +### Bug Fixes + +- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes. + +## v12.1.0 + +### New Features + +- Added `to.ByteSlicePtr()`. +- Added blob/queue storage resource ID to `azure.ResourceIdentifier`. + +## v12.0.0 + +### Breaking Changes + +In preparation for modules the following deprecated content has been removed. + + - async.NewFuture() + - async.Future.Done() + - async.Future.WaitForCompletion() + - async.DoPollForAsynchronous() + - The `utils` package + - validation.NewErrorWithValidationError() + - The `version` package + +## v11.9.0 + +### New Features + +- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds. + +## v11.8.0 + +### New Features + +- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation. + +## v11.7.1 + +### Bug Fixes + +- Fix missing support for http(s) proxy when using the default sender. + +## v11.7.0 + +### New Features + +- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package. + +## v11.6.1 + +### Bug Fixes + +- Fix ACR DNS endpoint for government clouds. +- Add Cosmos DB DNS endpoints. +- Update dependencies to resolve build breaks in OpenCensus. + +## v11.6.0 + +### New Features + +- Added type `autorest.BasicAuthorizer` to support Basic authentication. + +## v11.5.2 + +### Bug Fixes + +- Fixed `GetTokenFromCLI` did not work with zsh. + +## v11.5.1 + +### Bug Fixes + +- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2. + +## v11.5.0 + +### New Features + +- The `auth` package has been refactored so that the environment and file settings are now available. +- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created. +- Added support for certificate authorization for file-based config. + +## v11.4.0 + +### New Features + +- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests. +- Exported `adal.UserAgent()` for parity with `autorest.Client`. + +## v11.3.2 + +### Bug Fixes + +- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline. + +## v11.3.1 + +### Bug Fixes + +- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases. + +## v11.3.0 + +### New Features + +- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type. + +## v11.2.8 + +### Bug Fixes + +- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. + +## v11.2.7 + +### Bug Fixes + +- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. + Note that for backward compatibility reasons, both will work until the next major version release of the package. + +## v11.2.6 + +### Bug Fixes + +- If zero bytes are read from a polling response body don't attempt to unmarshal them. + +## v11.2.5 + +### Bug Fixes + +- Removed race condition in `autorest.DoRetryForStatusCodes`. + +## v11.2.4 + +### Bug Fixes + +- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. + +## v11.2.1 + +NOTE: Versions of Go prior to 1.10 have been removed from CI as they no +longer work with golint. + +### Bug Fixes + +- Method `MSIConfig.Authorizer` now supports user-assigned identities. +- The adal package now reports its own user-agent string. + +## v11.2.0 + +### New Features + +- Added `tracing` package that enables instrumentation of HTTP and API calls. + Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` + will start instrumenting the code for metrics and traces. + Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or + calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an + App Insights Local Forwarder that is needs to be running. Note that if the + AI Local Forwarder is not running tracking will still be enabled. + By default, instrumentation is disabled. Once enabled, instrumentation can also + be programatically disabled by calling `Disable`. +- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. + +### Bug Fixes + +- Don't use the initial request's context for LRO polling. +- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if + it is already set. + +## v11.1.1 + +### Bug Fixes + +- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. + +## v11.1.0 + +### New Features + +- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. +- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. + +## v11.0.1 + +### New Features + +- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. + +## v11.0.0 + +### Breaking Changes + +- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` + - ExpiresIn + - ExpiresOn + - NotBefore + +### New Features + +- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. +- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. + +## v10.15.5 + +### Bug Fixes + +- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response. + +## v10.15.4 + +### Bug Fixes + +- If a polling operation returns a failure status code return the associated error. + +## v10.15.3 + +### Bug Fixes + +- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. + +## v10.15.2 + +### Bug Fixes + +- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. + +## v10.15.1 + +### Bug Fixes + +- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. +- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. + +## v10.15.0 + +### New Features + +- Add initial support for request/response logging via setting environment variables. + Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response + without their bodies. To include the bodies set the log level to `LogDebug`. + By default the logger writes to strerr, however it can also write to stdout or a file + if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file + already exists it will be truncated. + IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key + headers. Any other secrets will _not_ be redacted. + +## v10.14.0 + +### New Features + +- Added package version that contains version constants and user-agent data. + +### Bug Fixes + +- Add the user-agent to token requests. + +## v10.13.0 + +- Added support for additionalInfo in ServiceError type. + +## v10.12.0 + +### New Features + +- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. + +## v10.11.4 + +### Bug Fixes + +- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). +- If there is no "final GET URL" return an error from Future.GetResult(). + +## v10.11.3 + +### Bug Fixes + +- In IMDS retry logic, if we don't receive a response don't retry. + - Renamed the retry function so it's clear it's meant for IMDS only. +- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. + - Also add the raw HTTP response to the DetailedResponse. +- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). + +## v10.11.2 + +### Bug Fixes + +- Validation for integers handles int and int64 types. + +## v10.11.1 + +### Bug Fixes + +- Adding User information to authorization config as parsed from CLI cache. + +## v10.11.0 + +### New Features + +- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret +- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token + +## v10.10.0 + +### New Features + +- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). +- Added method ServicePrincipalToken.SetRefreshCallbacks(). + +## v10.9.2 + +### Bug Fixes + +- Refreshing a refresh token obtained from a web app authorization code now works. + +## v10.9.1 + +### Bug Fixes + +- The retry logic for MSI token requests now uses exponential backoff per the guidelines. +- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. + +## v10.9.0 + +### Deprecated Methods + +| Old Method | New Method | +| -------------------------: | :---------------------------: | +| azure.NewFuture() | azure.NewFutureFromResponse() | +| Future.WaitForCompletion() | Future.WaitForCompletionRef() | + +### New Features + +- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. +- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. + +### Bug Fixes + +- Some futures failed to return their results, this should now be fixed. + +## v10.8.2 + +### Bug Fixes + +- Add nil-gaurd to token retry logic. + +## v10.8.1 + +### Bug Fixes + +- Return a TokenRefreshError if the sender fails on the initial request. +- Don't retry on non-temporary network errors. + +## v10.8.0 + +- Added NewAuthorizerFromEnvironmentWithResource() helper function. + +## v10.7.0 + +### New Features + +- Added \*WithContext() methods to ADAL token refresh operations. + +## v10.6.2 + +- Fixed a bug on device authentication. + +## v10.6.1 + +- Added retries to MSI token get request. + +## v10.6.0 + +- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. + +## v10.5.1 + +### Bug Fixes + +- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. + +## v10.5.0 + +### New Features + +- Added NewPollingRequestWithContext() for use with polling asynchronous operations. + +### Bug Fixes + +- Make retry logic use the request's context instead of the deprecated Cancel object. + +## v10.4.0 + +### New Features + +- Added helper for parsing Azure Resource ID's. +- Added deprecation message to utils.GetEnvVarOrExit() + +## v10.3.0 + +### New Features + +- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints +- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint + +## v10.2.0 + +### New Features + +- Added endpoints for batch management. + +## v10.1.3 + +### Bug Fixes + +- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). +- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. + +## v10.1.2 + +- Corrected comment for auth.NewAuthorizerFromFile() function. + +## v10.1.1 + +- Updated version number to match current release. + +## v10.1.0 + +### New Features + +- Expose the polling URL for futures. + +### Bug Fixes + +- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). + +## v10.0.0 + +### New Features + +- Added target and innererror fields to ServiceError to comply with OData v4 spec. +- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). +- Added helper methods for obtaining authorizers. +- Expose the polling URL for futures. + +### Bug Fixes + +- Switched from glide to dep for dependency management. +- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. +- Fixed a race condition in token refresh. + +### Breaking Changes + +- The ServiceError.Details field type has been changed to match the OData v4 spec. +- Go v1.7 has been dropped from CI. +- API parameter validation failures will now return a unique error type validation.Error. +- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). + +## v9.10.0 + +- Fix the Service Bus suffix in Azure public env +- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) + +## v9.9.0 + +### New Features + +- Added EventGridKeyAuthorizer for key authorization with event grid topics. + +### Bug Fixes + +- Fixed race condition when auto-refreshing service principal tokens. + +## v9.8.1 + +### Bug Fixes + +- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. +- Updated runtime version info so it's current. + +## v9.8.0 + +### New Features + +- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. + +## v9.7.1 + +### Bug Fixes + +- Use correct AAD and Graph endpoints for US Gov environment. + +## v9.7.0 + +### New Features + +- Added support for application/octet-stream MIME types. + +## v9.6.1 + +### Bug Fixes + +- Ensure Authorization header is added to request when polling for registration status. + +## v9.6.0 + +### New Features + +- Added support for acquiring tokens via MSI with a user assigned identity. + +## v9.5.3 + +### Bug Fixes + +- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. +- Set correct Content Type when using autorest.WithFormData. + +## v9.5.2 + +### Bug Fixes + +- Check for nil \*http.Response before dereferencing it. + +## v9.5.1 + +### Bug Fixes + +- Don't count http.StatusTooManyRequests (429) against the retry cap. +- Use retry logic when SkipResourceProviderRegistration is set to true. + +## v9.5.0 + +### New Features + +- Added support for username + password, API key, authoriazation code and cognitive services authentication. +- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. +- Added utility function AsStringSlice() to convert its parameters to a string slice. + +### Bug Fixes + +- When checking for authentication failures look at the error type not the status code as it could vary. + +## v9.4.2 + +### Bug Fixes + +- Validate parameters when creating credentials. +- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. + +## v9.4.1 + +### Bug Fixes + +- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this + environment variable is not set, it will fall back to use default path set by Azure CLI. +- Use case-insensitive string comparison for polling states. + +## v9.4.0 + +### New Features + +- Added WaitForCompletion() to Future as a default polling implementation. + +### Bug Fixes + +- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. + +## v9.3.1 + +### Bug Fixes + +- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. + +## v9.3.0 + +### New Features + +- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. +- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). + +## v9.2.0 + +### New Features + +- Added support for custom Azure Stack endpoints. +- Added type azure.Future used to track the status of long-running operations. + +### Bug Fixes + +- Preserve the original error in DoRetryWithRegistration when registration fails. + +## v9.1.1 + +- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. + +## v9.1.0 + +### New Features + +- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. +- Support for loading Azure CLI Authentication files. +- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. + +### Bug Fixes + +- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. +- Adding missing Apache Headers + +## v9.0.0 + +> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. + +Adding MSI Endpoint Support and CLI token rehydration. + +## v8.3.1 + +Pick up bug fix in adal for MSI support. + +## v8.3.0 + +Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. + +## v8.2.0 + +### New Features + +- Add support for bearer authentication callbacks +- Support 429 response codes that include "Retry-After" header +- Support validation constraint "Pattern" for map keys + +### Bug Fixes + +- Make RetriableRequest work with multiple versions of Go + +## v8.1.1 + +Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. + +## v8.1.0 + +Adds RetriableRequest type for more efficient handling of retrying HTTP requests. + +## v8.0.0 + +ADAL refactored into its own package. +Support for UNIX time. + +## v7.3.1 + +- Version Testing now removed from production bits that are shipped with the library. + +## v7.3.0 + +- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations + to acknowledge that they do not need either the entire or a trailing portion + of accepts response body. In doing so, Go's http library can reuse HTTP + connections more readily. +- Adding `PrepareDecorator` to target custom BaseURLs. +- Adding ACR suffix to public cloud environment. +- Updating Glide dependencies. + +## v7.2.5 + +- Fixed the Active Directory endpoint for the China cloud. +- Removes UTF-8 BOM if present in response payload. +- Added telemetry. + +## v7.2.3 + +- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay + duration. + +## v7.2.2 + +- autorest/azure: added ASM and ARM VM DNS suffixes. + +## v7.2.1 + +- fixed parsing of UTC times that are not RFC3339 conformant. + +## v7.2.0 + +- autorest/validation: Reformat validation error for better error message. + +## v7.1.0 + +- preparer: Added support for multipart formdata - WithMultiPartFormdata() +- preparer: Added support for sending file in request body - WithFile +- client: Added RetryDuration parameter. +- autorest/validation: new package for validation code for Azure Go SDK. + +## v7.0.7 + +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 + +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 + +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 + +- Better error messages for long running operation failures + +## v7.0.3 + +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 + +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 + +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 + +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 + +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 + +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 + +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 + +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 + +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 + +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/extender/code/vendor/github.com/Azure/go-autorest/GNUmakefile b/extender/code/vendor/github.com/Azure/go-autorest/GNUmakefile new file mode 100644 index 000000000..a434e73ac --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/GNUmakefile @@ -0,0 +1,23 @@ +DIR?=./autorest/ + +default: build + +build: fmt + go install $(DIR) + +test: + go test $(DIR) || exit 1 + +vet: + @echo "go vet ." + @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(DIR) + +.PHONY: build test vet fmt diff --git a/extender/code/vendor/github.com/Azure/go-autorest/Gopkg.lock b/extender/code/vendor/github.com/Azure/go-autorest/Gopkg.lock new file mode 100644 index 000000000..dc6e3e633 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/Gopkg.lock @@ -0,0 +1,324 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "UT" + revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" + version = "v0.6.0" + +[[projects]] + digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/metrics/v1", + "gen-go/agent/trace/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "UT" + revision = "d89fa54de508111353cb0b06403c00569be780d8" + version = "v0.2.1" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "UT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965" + name = "github.com/dimchansky/utfbom" + packages = ["."] + pruneopts = "UT" + revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "UT" + revision = "611e8accdfc92c4187d399e95ce826046d4c8d73" + +[[projects]] + digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "jsonpb", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "runtime", + "utilities", + ] + pruneopts = "UT" + revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" + version = "v1.12.1" + +[[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "UT" + revision = "aad2c527c5defcf89b5afab7f37274304195a6b2" + version = "v0.22.2" + +[[projects]] + branch = "master" + digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae" + name = "golang.org/x/crypto" + packages = [ + "pkcs12", + "pkcs12/internal/rc2", + ] + pruneopts = "UT" + revision = "e9b2fee46413994441b28dfca259d911d963dfed" + +[[projects]] + branch = "master" + digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43" + name = "golang.org/x/lint" + packages = [ + ".", + "golint", + ] + pruneopts = "UT" + revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448" + +[[projects]] + branch = "master" + digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910" + name = "golang.org/x/net" + packages = [ + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "1ddd1de85cb0337b623b740a609d35817d516a8d" + +[[projects]] + branch = "master" + digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" + +[[projects]] + branch = "master" + digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/types/typeutil", + ] + pruneopts = "UT" + revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42" + +[[projects]] + digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "8a410c21381766a810817fd6200fce8838ecb277" + version = "v0.14.0" + +[[projects]] + branch = "master" + digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/httpbody", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "UT" + revision = "51378566eb590fa106d1025ea12835a4416dda84" + +[[projects]] + digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301" + name = "google.golang.org/grpc" + packages = [ + ".", + "backoff", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/buffer", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/resolver/dns", + "internal/resolver/passthrough", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514" + version = "v1.25.1" + +[[projects]] + digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" + version = "v2.2.7" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "contrib.go.opencensus.io/exporter/ocagent", + "github.com/dgrijalva/jwt-go", + "github.com/dimchansky/utfbom", + "github.com/mitchellh/go-homedir", + "github.com/stretchr/testify/require", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/plugin/ochttp/propagation/tracecontext", + "go.opencensus.io/stats/view", + "go.opencensus.io/trace", + "golang.org/x/crypto/pkcs12", + "golang.org/x/lint/golint", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/extender/code/vendor/github.com/Azure/go-autorest/Gopkg.toml b/extender/code/vendor/github.com/Azure/go-autorest/Gopkg.toml new file mode 100644 index 000000000..1fc286596 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/Gopkg.toml @@ -0,0 +1,59 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +required = ["golang.org/x/lint/golint"] + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "contrib.go.opencensus.io/exporter/ocagent" + version = "0.6.0" + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.2.0" + +[[constraint]] + name = "github.com/dimchansky/utfbom" + version = "1.1.0" + +[[constraint]] + name = "github.com/mitchellh/go-homedir" + version = "1.1.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.3.0" + +[[constraint]] + name = "go.opencensus.io" + version = "0.22.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/extender/code/vendor/github.com/Azure/go-autorest/LICENSE b/extender/code/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/Azure/go-autorest/README.md b/extender/code/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 000000000..de1e19a44 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,165 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master) +[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. + +An authentication client tested with Azure Active Directory (AAD) is also +provided in this repo in the package +`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package +is maintained only as part of the Azure Go SDK and is not related to other +"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). + +## Overview + +Package go-autorest implements an HTTP request pipeline suitable for use across +multiple goroutines and provides the shared routines used by packages generated +by [Autorest](https://github.com/Azure/autorest.go). + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +### Using with Go Modules +In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules. + +- autorest/adal +- autorest/azure/auth +- autorest/azure/cli +- autorest/date +- autorest/mocks +- autorest/to +- autorest/validation +- autorest +- logger +- tracing + +Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules. + +## License + +See LICENSE file. + +----- + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/extender/code/vendor/github.com/Azure/go-autorest/autorest/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 000000000..fec416a9c --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,292 @@ +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + *oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + *oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + *oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + *oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + *oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 000000000..fa5964742 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,151 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" +) + +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + api := "" + // it's legal for tenantID to be empty so don't validate it + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 000000000..9daa4b58b --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,273 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +// Deprecated: use InitiateDeviceAuthWithContext() instead. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource) +} + +// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +// Deprecated: use CheckForUserCompletionWithContext() instead. +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return CheckForUserCompletionWithContext(context.Background(), sender, code) +} + +// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + // return a more meaningful error message if available + if token.ErrorDescription != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription) + } + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +// Deprecated: use WaitForUserCompletionWithContext() instead. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return WaitForUserCompletionWithContext(context.Background(), sender, code) +} + +// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error +// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletionWithContext(ctx, sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + select { + case <-time.After(waitDuration): + // noop + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod new file mode 100644 index 000000000..abcc27d4c --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -0,0 +1,12 @@ +module github.com/Azure/go-autorest/autorest/adal + +go 1.12 + +require ( + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/date v0.3.0 + github.com/Azure/go-autorest/autorest/mocks v0.4.1 + github.com/Azure/go-autorest/tracing v0.6.0 + github.com/form3tech-oss/jwt-go v3.2.2+incompatible + golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 +) diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum new file mode 100644 index 000000000..9d55b0f59 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -0,0 +1,19 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go new file mode 100644 index 000000000..7551b7923 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 000000000..2a974a39b --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,135 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "golang.org/x/crypto/pkcs12" +) + +var ( + // ErrMissingCertificate is returned when no local certificate is found in the provided PFX data. + ErrMissingCertificate = errors.New("adal: certificate missing") + + // ErrMissingPrivateKey is returned when no private key is found in the provided PFX data. + ErrMissingPrivateKey = errors.New("adal: private key missing") +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} + +// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data. +// The PFX data must contain a private key along with a certificate whose public key matches that of the +// private key or an error is returned. +// If the private key is not password protected pass the empty string for password. +func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + blocks, err := pkcs12.ToPEM(pfxData, password) + if err != nil { + return nil, nil, err + } + // first extract the private key + var priv *rsa.PrivateKey + for _, block := range blocks { + if block.Type == "PRIVATE KEY" { + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + break + } + } + if priv == nil { + return nil, nil, ErrMissingPrivateKey + } + // now find the certificate with the matching public key of our private key + var cert *x509.Certificate + for _, block := range blocks { + if block.Type == "CERTIFICATE" { + pcert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certKey, ok := pcert.PublicKey.(*rsa.PublicKey) + if !ok { + // keep looking + continue + } + if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 { + // found a match + cert = pcert + break + } + } + } + if cert == nil { + return nil, nil, ErrMissingCertificate + } + return cert, priv, nil +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 000000000..d7e4372bb --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,95 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "net/http" + "net/http/cookiejar" + "sync" + + "github.com/Azure/go-autorest/tracing" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +var defaultSender Sender +var defaultSenderInit = &sync.Once{} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +func sender() Sender { + // note that we can't init defaultSender in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenderInit.Do(func() { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSender +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 000000000..b83f16a49 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,1198 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/form3tech-oss/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the API version to use for the MSI endpoint + msiAPIVersion = "2018-02-01" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 + + // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions + asMSIEndpointEnv = "MSI_ENDPOINT" + + // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions + asMSISecretEnv = "MSI_SECRET" + + // the API version to use for the App Service MSI endpoint + appServiceAPIVersion = "2017-09-01" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// TokenRefresh is a type representing a custom callback to refresh a token +type TokenRefresh func(ctx context.Context, resource string) (*Token, error) + +// Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := t.ExpiresOn.Float64() + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(s) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string `json:"value"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c + token.Claims = jwt.MapClaims{ + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(24 * time.Hour).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + customRefreshFunc TokenRefresh + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + // Settings this to a value less than 1 will use the default value. + MaxMSIRefreshAttempts int +} + +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} + +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// SetCustomRefreshFunc sets a custom refresh function used to refresh the token. +func (spt *ServicePrincipalToken) SetCustomRefreshFunc(customRefreshFunc TokenRefresh) { + spt.customRefreshFunc = customRefreshFunc +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = sender() + } + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +// NOTE: this only indicates if the ASE environment credentials have been set +// which does not necessarily mean that the caller is authenticating via ASE! +func isAppService() bool { + _, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) + _, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv) + + return asMSIEndpointEnvExists && asMSISecretEnvExists +} + +// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions +func GetMSIAppServiceEndpoint() (string, error) { + asMSIEndpoint, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) + + if asMSIEndpointEnvExists { + return asMSIEndpoint, nil + } + return "", errors.New("MSI endpoint not found") +} + +// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment +func GetMSIEndpoint() (string, error) { + if isAppService() { + return GetMSIAppServiceEndpoint() + } + return GetMSIVMEndpoint() +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the clientID of specified user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the azure resource id of user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, &identityResourceID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, identityResourceID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != nil { + if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + } + if identityResourceID != nil { + if err := validateStringParam(*identityResourceID, "identityResourceID"); err != nil { + return nil, err + } + } + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("resource", resource) + // App Service MSI currently only supports token API version 2017-09-01 + if isAppService() { + v.Set("api-version", appServiceAPIVersion) + } else { + v.Set("api-version", msiAPIVersion) + } + if userAssignedID != nil { + v.Set("client_id", *userAssignedID) + } + if identityResourceID != nil { + v.Set("mi_res_id", *identityResourceID) + } + msiEndpointURL.RawQuery = v.Encode() + + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{}, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + if userAssignedID != nil { + spt.inner.ClientID = *userAssignedID + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + // must take the read lock when initially checking the token's expiration + if spt.inner.AutoRefresh && spt.Token().WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check again to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.inner.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func isIMDS(u url.URL) bool { + return isMSIEndpoint(u) == true || isASEEndpoint(u) == true +} + +func isMSIEndpoint(endpoint url.URL) bool { + msi, err := url.Parse(msiEndpoint) + if err != nil { + return false + } + return endpoint.Host == msi.Host && endpoint.Path == msi.Path +} + +func isASEEndpoint(endpoint url.URL) bool { + aseEndpoint, err := GetMSIAppServiceEndpoint() + if err != nil { + // app service environment isn't enabled + return false + } + ase, err := url.Parse(aseEndpoint) + if err != nil { + return false + } + return endpoint.Host == ase.Host && endpoint.Path == ase.Path +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + if spt.customRefreshFunc != nil { + token, err := spt.customRefreshFunc(ctx, resource) + if err != nil { + return err + } + spt.inner.Token = *token + return spt.InvokeRefreshCallbacks(spt.inner.Token) + } + + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req.Header.Add("User-Agent", UserAgent()) + // Add header when runtime is on App Service or Functions + if isASEEndpoint(spt.inner.OauthConfig.TokenEndpoint) { + asMSISecret, _ := os.LookupEnv(asMSISecretEnv) + req.Header.Add("Secret", asMSISecret) + } + req = req.WithContext(ctx) + if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + } + + if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + req.Method = http.MethodGet + req.Header.Set(metadataHeader, "true") + } + + var resp *http.Response + if isMSIEndpoint(spt.inner.OauthConfig.TokenEndpoint) { + resp, err = getMSIEndpoint(ctx, spt.sender) + if err != nil { + // return a TokenRefreshError here so that we don't keep retrying + return newTokenRefreshError(fmt.Sprintf("the MSI endpoint is not available. Failed HTTP request to MSI endpoint: %v", err), nil) + } + resp.Body.Close() + } + if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + resp, err = spt.sender.Do(req) + } + if err != nil { + // don't return a TokenRefreshError here; this will allow retry logic to apply + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } + + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) + } + + spt.inner.Token = token + + return spt.InvokeRefreshCallbacks(token) +} + +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + // maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made + if maxAttempts < 1 { + maxAttempts = defaultMaxMSIRefreshAttempts + } + + for attempt < maxAttempts { + if resp != nil && resp.Body != nil { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } + resp, err = sender.Do(req) + // we want to retry if err is not nil or the status code is in the list of retry codes + if err == nil && !responseHasStatusCode(resp, retries...) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +func responseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp != nil { + for _, i := range codes { + if i == resp.StatusCode { + return true + } + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.inner.AutoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.inner.RefreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// MSIAvailable returns true if the MSI endpoint is available for authentication. +func MSIAvailable(ctx context.Context, sender Sender) bool { + resp, err := getMSIEndpoint(ctx, sender) + if err == nil { + resp.Body.Close() + } + return err == nil +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go new file mode 100644 index 000000000..45e01a7ee --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go @@ -0,0 +1,36 @@ +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + // this cannot fail, the return sig is due to legacy reasons + msiEndpoint, _ := GetMSIVMEndpoint() + tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + defer cancel() + // http.NewRequestWithContext() was added in Go 1.13 + req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go new file mode 100644 index 000000000..6f7ad8078 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go @@ -0,0 +1,36 @@ +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + // this cannot fail, the return sig is due to legacy reasons + msiEndpoint, _ := GetMSIVMEndpoint() + tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + defer cancel() + req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil) + req = req.WithContext(tempCtx) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/version.go new file mode 100644 index 000000000..c867b3484 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -0,0 +1,45 @@ +package adal + +import ( + "fmt" + "runtime" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const number = "v1.0.0" + +var ( + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 000000000..f43e1a6ed --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,337 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" + authorization = "Authorization" + basic = "Basic" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if s == nil { + s = sender(tls.RenegotiateNever) + } + return &BearerAuthorizerCallback{sender: s, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err != nil { + return r, err + } + DrainResponseBody(resp) + if resp.StatusCode == 401 && hasBearerChallenge(resp.Header) { + bc, err := newBearerChallenge(resp.Header) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(header http.Header) bool { + authHeader := header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(header http.Header) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header +// with the value "Basic " where is a base64-encoded username:password tuple. +type BasicAuthorizer struct { + userName string + password string +} + +// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. +func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { + return &BasicAuthorizer{ + userName: userName, + password: password, + } +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Basic " followed by the base64-encoded username:password tuple. +func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. +type MultiTenantServicePrincipalTokenAuthorizer interface { + WithAuthorization() PrepareDecorator +} + +// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider +func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { + return &multiTenantSPTAuthorizer{tp: tp} +} + +type multiTenantSPTAuthorizer struct { + tp adal.MultitenantOAuthTokenProvider +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the +// primary token along with the auxiliary authorization header using the auxiliary tokens. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, + "Failed to refresh one or more Tokens for request to %s", r.URL) + } + } + r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) + if err != nil { + return r, err + } + auxTokens := mt.tp.AuxiliaryOAuthTokens() + for i := range auxTokens { + auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) + } + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) + }) + } +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go new file mode 100644 index 000000000..89a659cb6 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go @@ -0,0 +1,67 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "strings" +) + +// SASTokenAuthorizer implements an authorization for SAS Token Authentication +// this can be used for interaction with Blob Storage Endpoints +type SASTokenAuthorizer struct { + sasToken string +} + +// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials +func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) { + if strings.TrimSpace(sasToken) == "" { + return nil, fmt.Errorf("sasToken cannot be empty") + } + + token := sasToken + if strings.HasPrefix(sasToken, "?") { + token = strings.TrimPrefix(sasToken, "?") + } + + return &SASTokenAuthorizer{ + sasToken: token, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the +// URI's query parameters. This can be used for the Blob, Queue, and File Services. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature +func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + if r.URL.RawQuery != "" { + r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken) + } else { + r.URL.RawQuery = sas.sasToken + } + + r.RequestURI = r.URL.String() + return Prepare(r) + }) + } +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go new file mode 100644 index 000000000..b844a3df4 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go @@ -0,0 +1,304 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +// SharedKeyType defines the enumeration for the various shared key types. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types. +type SharedKeyType string + +const ( + // SharedKey is used to authorize against blobs, files and queues services. + SharedKey SharedKeyType = "sharedKey" + + // SharedKeyForTable is used to authorize against the table service. + SharedKeyForTable SharedKeyType = "sharedKeyTable" + + // SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for + // backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead. + SharedKeyLite SharedKeyType = "sharedKeyLite" + + // SharedKeyLiteForTable is used to authorize against the table service. It's provided for + // backwards compatibility with older table API versions. Prefer SharedKeyForTable instead. + SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable" +) + +const ( + headerAccept = "Accept" + headerAcceptCharset = "Accept-Charset" + headerContentEncoding = "Content-Encoding" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentLanguage = "Content-Language" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerDate = "Date" + headerXMSDate = "X-Ms-Date" + headerXMSVersion = "x-ms-version" + headerRange = "Range" +) + +const storageEmulatorAccountName = "devstoreaccount1" + +// SharedKeyAuthorizer implements an authorization for Shared Key +// this can be used for interaction with Blob, File and Queue Storage Endpoints +type SharedKeyAuthorizer struct { + accountName string + accountKey []byte + keyType SharedKeyType +} + +// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type. +func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) { + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return nil, fmt.Errorf("malformed storage account key: %v", err) + } + return &SharedKeyAuthorizer{ + accountName: accountName, + accountKey: key, + keyType: keyType, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is " " followed by the computed key. +// This can be used for the Blob, Queue, and File Services +// +// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key +// You may use Shared Key authorization to authorize a request made against the +// 2009-09-19 version and later of the Blob and Queue services, +// and version 2014-02-14 and later of the File services. +func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType) + if err != nil { + return r, err + } + return Prepare(r, WithHeader(headerAuthorization, sk)) + }) + } +} + +func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) { + canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType) + if err != nil { + return "", err + } + + if req.Header == nil { + req.Header = http.Header{} + } + + // ensure date is set + if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" { + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(headerXMSDate, date) + } + canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType) + if err != nil { + return "", err + } + return createAuthorizationHeader(accName, accKey, canString, keyType), nil +} + +func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := bytes.NewBufferString("") + if accountName != storageEmulatorAccountName { + cr.WriteString("/") + cr.WriteString(getCanonicalizedAccountName(accountName)) + } + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 + if keyType == SharedKey { + if len(params) > 0 { + cr.WriteString("\n") + + keys := []string{} + for key := range params { + keys = append(keys, key) + } + sort.Strings(keys) + + completeParams := []string{} + for _, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) + } + cr.WriteString(strings.Join(completeParams, "\n")) + } + } else { + // search for "comp" parameter, if exists then add it to canonicalizedresource + if v, ok := params["comp"]; ok { + cr.WriteString("?comp=" + v[0]) + } + } + + return string(cr.Bytes()), nil +} + +func getCanonicalizedAccountName(accountName string) string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(accountName, "-secondary") +} + +func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) { + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + date := headers.Get(headerDate) + if v := headers.Get(headerXMSDate); v != "" { + if keyType == SharedKey || keyType == SharedKeyLite { + date = "" + } else { + date = v + } + } + var canString string + switch keyType { + case SharedKey: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyForTable: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + canonicalizedResource, + }, "\n") + case SharedKeyLite: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyLiteForTable: + canString = strings.Join([]string{ + date, + canonicalizedResource, + }, "\n") + default: + return "", fmt.Errorf("key type '%s' is not supported", keyType) + } + return canString, nil +} + +func buildCanonicalizedHeader(headers http.Header) string { + cm := make(map[string]string) + + for k := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = headers.Get(k) + } + } + + if len(cm) == 0 { + return "" + } + + keys := []string{} + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := bytes.NewBufferString("") + + for _, key := range keys { + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(cm[key]) + ch.WriteRune('\n') + } + + return strings.TrimSuffix(string(ch.Bytes()), "\n") +} + +func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string { + h := hmac.New(sha256.New, accountKey) + h.Write([]byte(canonicalizedString)) + signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) + var key string + switch keyType { + case SharedKey, SharedKeyForTable: + key = "SharedKey" + case SharedKeyLite, SharedKeyLiteForTable: + key = "SharedKeyLite" + } + return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature) +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 000000000..aafdf021f --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,150 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 000000000..c5fc511f6 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,934 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/tracing" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + pt pollingTracker +} + +// NewFutureFromResponse returns a new Future object initialized +// with the initial response from an asynchronous operation. +func NewFutureFromResponse(resp *http.Response) (Future, error) { + pt, err := createPollingTracker(resp) + return Future{pt: pt}, err +} + +// Response returns the last HTTP response. +func (f Future) Response() *http.Response { + if f.pt == nil { + return nil + } + return f.pt.latestResponse() +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.pt == nil { + return "" + } + return f.pt.pollingStatus() +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + if f.pt == nil { + return PollingUnknown + } + return f.pt.pollingMethod() +} + +// DoneWithContext queries the service to see if the operation has completed. +func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + + if f.pt == nil { + return false, autorest.NewError("Future", "Done", "future is not initialized") + } + if f.pt.hasTerminated() { + return true, f.pt.pollingError() + } + if err := f.pt.pollForStatus(ctx, sender); err != nil { + return false, err + } + if err := f.pt.checkForErrors(); err != nil { + return f.pt.hasTerminated(), err + } + if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { + return false, err + } + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { + return false, err + } + return f.pt.hasTerminated(), f.pt.pollingError() +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.pt == nil { + return 0, false + } + resp := f.pt.latestResponse() + if resp == nil { + return 0, false + } + + retry := resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletionRef will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// If no deadline is specified in the context then the client.PollingDuration will be +// used to determine if a default deadline should be used. +// If PollingDuration is greater than zero the value will be used as the context's timeout. +// If PollingDuration is zero then no default deadline will be used. +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + cancelCtx := ctx + // if the provided context already has a deadline don't override it + _, hasDeadline := ctx.Deadline() + if d := client.PollingDuration; !hasDeadline && d != 0 { + var cancel context.CancelFunc + cancelCtx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + + done, err := f.DoneWithContext(ctx, client) + for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") + } + } + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(f.pt) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + // unmarshal into JSON object to determine the tracker type + obj := map[string]interface{}{} + err := json.Unmarshal(data, &obj) + if err != nil { + return err + } + if obj["method"] == nil { + return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") + } + method := obj["method"].(string) + switch strings.ToUpper(method) { + case http.MethodDelete: + f.pt = &pollingTrackerDelete{} + case http.MethodPatch: + f.pt = &pollingTrackerPatch{} + case http.MethodPost: + f.pt = &pollingTrackerPost{} + case http.MethodPut: + f.pt = &pollingTrackerPut{} + default: + return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) + } + // now unmarshal into the tracker + return json.Unmarshal(data, &f.pt) +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +func (f Future) PollingURL() string { + if f.pt == nil { + return "" + } + return f.pt.pollingURL() +} + +// GetResult should be called once polling has completed successfully. +// It makes the final GET call to retrieve the resultant payload. +func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { + if f.pt.finalGetURL() == "" { + // we can end up in this situation if the async operation returns a 200 + // with no polling URLs. in that case return the response which should + // contain the JSON payload (only do this for successful terminal cases). + if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { + return lr, nil + } + return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") + } + req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) + if err != nil { + return nil, err + } + resp, err := sender.Do(req) + if err == nil && resp.Body != nil { + // copy the body and close it so callers don't have to + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, err + } + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + return resp, err +} + +type pollingTracker interface { + // these methods can differ per tracker + + // checks the response headers and status code to determine the polling mechanism + updatePollingMethod() error + + // checks the response for tracker-specific error conditions + checkForErrors() error + + // returns true if provisioning state should be checked + provisioningStateApplicable() bool + + // methods common to all trackers + + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + + // initializes the tracker's internal state, call this when the tracker is created + initializeState() error + + // makes an HTTP request to check the status of the LRO + pollForStatus(ctx context.Context, sender autorest.Sender) error + + // updates internal tracker state, call this after each call to pollForStatus + updatePollingState(provStateApl bool) error + + // returns the error response from the service, can be nil + pollingError() error + + // returns the polling method being used + pollingMethod() PollingMethodType + + // returns the state of the LRO as returned from the service + pollingStatus() string + + // returns the URL used for polling status + pollingURL() string + + // returns the URL used for the final GET to retrieve the resource + finalGetURL() string + + // returns true if the LRO is in a terminal state + hasTerminated() bool + + // returns true if the LRO is in a failed terminal state + hasFailed() bool + + // returns true if the LRO is in a successful terminal state + hasSucceeded() bool + + // returns the cached HTTP response after a call to pollForStatus(), can be nil + latestResponse() *http.Response +} + +type pollingTrackerBase struct { + // resp is the last response, either from the submission of the LRO or from polling + resp *http.Response + + // method is the HTTP verb, this is needed for deserialization + Method string `json:"method"` + + // rawBody is the raw JSON response body + rawBody map[string]interface{} + + // denotes if polling is using async-operation or location header + Pm PollingMethodType `json:"pollingMethod"` + + // the URL to poll for status + URI string `json:"pollingURI"` + + // the state of the LRO as returned from the service + State string `json:"lroState"` + + // the URL to GET for the final result + FinalGetURI string `json:"resultURI"` + + // used to hold an error object returned from the service + Err *ServiceError `json:"error,omitempty"` +} + +func (pt *pollingTrackerBase) initializeState() error { + // determine the initial polling state based on response body and/or HTTP status + // code. this is applicable to the initial LRO response, not polling responses! + pt.Method = pt.resp.Request.Method + if err := pt.updateRawBody(); err != nil { + return err + } + switch pt.resp.StatusCode { + case http.StatusOK: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } + } else { + pt.State = operationSucceeded + } + case http.StatusCreated: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationInProgress + } + case http.StatusAccepted: + pt.State = operationInProgress + case http.StatusNoContent: + pt.State = operationSucceeded + default: + pt.State = operationFailed + pt.updateErrorFromResponse() + return pt.pollingError() + } + return pt.initPollingMethod() +} + +func (pt pollingTrackerBase) getProvisioningState() *string { + if pt.rawBody != nil && pt.rawBody["properties"] != nil { + p := pt.rawBody["properties"].(map[string]interface{}) + if ps := p["provisioningState"]; ps != nil { + s := ps.(string) + return &s + } + } + return nil +} + +func (pt *pollingTrackerBase) updateRawBody() error { + pt.rawBody = map[string]interface{}{} + if pt.resp.ContentLength != 0 { + defer pt.resp.Body.Close() + b, err := ioutil.ReadAll(pt.resp.Body) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") + } + // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty + if len(b) == 0 { + return nil + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + if err = json.Unmarshal(b, &pt.rawBody); err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") + } + } + return nil +} + +func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { + req, err := http.NewRequest(http.MethodGet, pt.URI, nil) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") + } + + req = req.WithContext(ctx) + preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) + req, err = preparer.Prepare(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") + } + pt.resp, err = sender.Do(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") + } + if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { + // reset the service error on success case + pt.Err = nil + err = pt.updateRawBody() + } else { + // check response body for error content + pt.updateErrorFromResponse() + err = pt.pollingError() + } + return err +} + +// attempts to unmarshal a ServiceError type from the response body. +// if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. +func (pt *pollingTrackerBase) updateErrorFromResponse() { + var err error + if pt.resp.ContentLength != 0 { + type respErr struct { + ServiceError *ServiceError `json:"error"` + } + re := respErr{} + defer pt.resp.Body.Close() + var b []byte + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 { + goto Default + } + if err = json.Unmarshal(b, &re); err != nil { + goto Default + } + // unmarshalling the error didn't yield anything, try unwrapped error + if re.ServiceError == nil { + err = json.Unmarshal(b, &re.ServiceError) + if err != nil { + goto Default + } + } + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { + pt.Err = re.ServiceError + return + } + } +Default: + se := &ServiceError{ + Code: pt.pollingStatus(), + Message: "The async operation failed.", + } + if err != nil { + se.InnerError = make(map[string]interface{}) + se.InnerError["unmarshalError"] = err.Error() + } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } + pt.Err = se +} + +func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { + if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { + pt.State = pt.rawBody["status"].(string) + } else { + if pt.resp.StatusCode == http.StatusAccepted { + pt.State = operationInProgress + } else if provStateApl { + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationSucceeded + } + } else { + return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") + } + } + // if the operation has failed update the error state + if pt.hasFailed() { + pt.updateErrorFromResponse() + } + return nil +} + +func (pt pollingTrackerBase) pollingError() error { + if pt.Err == nil { + return nil + } + return pt.Err +} + +func (pt pollingTrackerBase) pollingMethod() PollingMethodType { + return pt.Pm +} + +func (pt pollingTrackerBase) pollingStatus() string { + return pt.State +} + +func (pt pollingTrackerBase) pollingURL() string { + return pt.URI +} + +func (pt pollingTrackerBase) finalGetURL() string { + return pt.FinalGetURI +} + +func (pt pollingTrackerBase) hasTerminated() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) hasFailed() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) +} + +func (pt pollingTrackerBase) hasSucceeded() bool { + return strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) latestResponse() *http.Response { + return pt.resp +} + +// error checking common to all trackers +func (pt pollingTrackerBase) baseCheckForErrors() error { + // for Azure-AsyncOperations the response body cannot be nil or empty + if pt.Pm == PollingAsyncOperation { + if pt.resp.Body == nil || pt.resp.ContentLength == 0 { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") + } + if pt.rawBody["status"] == nil { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") + } + } + return nil +} + +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + +// DELETE + +type pollingTrackerDelete struct { + pollingTrackerBase +} + +func (pt *pollingTrackerDelete) updatePollingMethod() error { + // for 201 the Location header is required + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + } + pt.Pm = PollingLocation + pt.FinalGetURI = pt.URI + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerDelete) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerDelete) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PATCH + +type pollingTrackerPatch struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPatch) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + // note the absence of the "final GET" mechanism for PATCH + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + if ao == "" { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } else { + pt.URI = lh + pt.Pm = PollingLocation + } + } + } + return nil +} + +func (pt pollingTrackerPatch) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPatch) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// POST + +type pollingTrackerPost struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPost) updatePollingMethod() error { + // 201 requires Location header + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + pt.FinalGetURI = lh + pt.Pm = PollingLocation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPost) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPost) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PUT + +type pollingTrackerPut struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPut) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPut) checkForErrors() error { + err := pt.baseCheckForErrors() + if err != nil { + return err + } + // if there are no LRO headers then the body cannot be empty + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } + lh, err := getURLFromLocationHeader(pt.resp) + if err != nil { + return err + } + if ao == "" && lh == "" && len(pt.rawBody) == 0 { + return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") + } + return nil +} + +func (pt pollingTrackerPut) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// creates a polling tracker based on the verb of the original request +func createPollingTracker(resp *http.Response) (pollingTracker, error) { + var pt pollingTracker + switch strings.ToUpper(resp.Request.Method) { + case http.MethodDelete: + pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPatch: + pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPost: + pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPut: + pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} + default: + return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) + } + if err := pt.initializeState(); err != nil { + return pt, err + } + // this initializes the polling header values, we do this during creation in case the + // initial response send us invalid values; this way the API call will return a non-nil + // error (not doing this means the error shows up in Future.Done) + return pt, pt.updatePollingMethod() +} + +// gets the polling URL from the Azure-AsyncOperation header. +// ensures the URL is well-formed and absolute. +func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// gets the polling URL from the Location header. +// ensures the URL is well-formed and absolute. +func getURLFromLocationHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// verify that the URL is valid and absolute +func isValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string + +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingRequestURI indicates the polling method uses the original request URI. + PollingRequestURI PollingMethodType = "RequestURI" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 000000000..26be936b7 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,335 @@ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` +} + +func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%v", string(d)) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%v", string(d)) + } + + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // per the OData v4 spec the details field must be an array of JSON objects. + // unfortunately not all services adhear to the spec and just return a single + // object instead of an array with one object. so we have to perform some + // shenanigans to accommodate both cases. + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceError1 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + type serviceError2 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + se1 := serviceError1{} + err := json.Unmarshal(b, &se1) + if err == nil { + se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) + return nil + } + + se2 := serviceError2{} + err = json.Unmarshal(b, &se2) + if err == nil { + se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) + se.Details = append(se.Details, se2.Details) + return nil + } + return err +} + +func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { + se.Code = code + se.Message = message + se.Target = target + se.Details = details + se.InnerError = inner + se.AdditionalInfo = additional +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error" xml:"Error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + encodedAs := autorest.EncodedAsJSON + if strings.Contains(resp.Header.Get("Content-Type"), "xml") { + encodedAs = autorest.EncodedAsXML + } + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } + if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&e.ServiceError); err != nil { + return err + } + } + if e.ServiceError.Message == "" { + // if we're here it means the returned error wasn't OData v4 compliant. + // try to unmarshal the body in hopes of getting something. + rawBody := map[string]interface{}{} + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&rawBody); err != nil { + return err + } + + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + if len(rawBody) > 0 { + e.ServiceError.Details = []map[string]interface{}{rawBody} + } + } + e.Response = resp + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 000000000..6c20b8179 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,244 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + // EnvironmentFilepathName captures the name of the environment variable containing the path to the file + // to be used while populating the Azure Environment. + EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + + // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. + NotAvailable = "N/A" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// ResourceIdentifier contains a set of Azure resource IDs. +type ResourceIdentifier struct { + Graph string `json:"graph"` + KeyVault string `json:"keyVault"` + Datalake string `json:"datalake"` + Batch string `json:"batch"` + OperationalInsights string `json:"operationalInsights"` + Storage string `json:"storage"` +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + TokenAudience string `json:"tokenAudience"` + ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.windows.net", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + CosmosDBDNSSuffix: "documents.azure.com", + TokenAudience: "https://management.azure.com/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.azure.net", + Datalake: "https://datalake.azure.net/", + Batch: "https://batch.core.windows.net/", + OperationalInsights: "https://api.loganalytics.io", + Storage: "https://storage.azure.com/", + }, + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.us", + CosmosDBDNSSuffix: "documents.azure.us", + TokenAudience: "https://management.usgovcloudapi.net/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.usgovcloudapi.net", + Datalake: NotAvailable, + Batch: "https://batch.core.usgovcloudapi.net/", + OperationalInsights: "https://api.loganalytics.us", + Storage: "https://storage.azure.com/", + }, + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.cn", + CosmosDBDNSSuffix: "documents.azure.cn", + TokenAudience: "https://management.chinacloudapi.cn/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.chinacloudapi.cn/", + KeyVault: "https://vault.azure.cn", + Datalake: NotAvailable, + Batch: "https://batch.chinacloudapi.cn/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: NotAvailable, + CosmosDBDNSSuffix: "documents.microsoftazure.de", + TokenAudience: "https://management.microsoftazure.de/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.cloudapi.de/", + KeyVault: "https://vault.microsoftazure.de", + Datalake: NotAvailable, + Batch: "https://batch.cloudapi.de/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, + } +) + +// EnvironmentFromName returns an Environment based on the common name specified. +func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + + return env, nil +} + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 000000000..507f9e95c --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 000000000..c6d39f686 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,204 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + + var re RequestError + if strings.Contains(r.Header.Get("Content-Type"), "xml") { + // XML errors (e.g. Storage Data Plane) only return the inner object + err = autorest.Respond(resp, autorest.ByUnmarshallingXML(&re.ServiceError)) + } else { + err = autorest.Respond(resp, autorest.ByUnmarshallingJSON(&re)) + } + + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) + } + } + } + return resp, err + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/client.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 000000000..e04f9fd4e --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,323 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/logger" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second +) + +var ( + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. +// If there was no response (i.e. the underlying http.Response is nil) the return value is false. +func (r Response) IsHTTPStatus(statusCode int) bool { + if r.Response == nil { + return false + } + return r.Response.StatusCode == statusCode +} + +// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. +// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided +// the return value is false. +func (r Response) HasHTTPStatus(statusCodes ...int) bool { + return ResponseHasStatusCode(r.Response, statusCodes...) +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool + + // SendDecorators can be used to override the default chain of SendDecorators. + // This can be used to specify things like a custom retry SendDecorator. + // Set this to an empty slice to use no SendDecorators. + SendDecorators []SendDecorator +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + return newClient(ua, tls.RenegotiateNever) +} + +// ClientOptions contains various Client configuration options. +type ClientOptions struct { + // UserAgent is an optional user-agent string to append to the default user agent. + UserAgent string + + // Renegotiation is an optional setting to control client-side TLS renegotiation. + Renegotiation tls.RenegotiationSupport +} + +// NewClientWithOptions returns an instance of a Client with the specified values. +func NewClientWithOptions(options ClientOptions) Client { + return newClient(options.UserAgent, options.Renegotiation) +} + +func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: DefaultRetryDuration, + UserAgent: UserAgent(), + } + c.Sender = c.sender(renegotiation) + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations + r, err := Prepare(r, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) + resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) + logger.Instance.WriteResponse(resp, logger.Filter{}) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { + if c.Sender == nil { + return sender(renengotiation) + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} + +// Send sends the provided http.Request using the client's Sender or the default sender. +// It returns the http.Response and possible error. It also accepts a, possibly empty, +// default set of SendDecorators used when sending the request. +// SendDecorators have the following precedence: +// 1. In a request's context via WithSendDecorators() +// 2. Specified on the client in SendDecorators +// 3. The default values specified in this method +func (c Client) Send(req *http.Request, decorators ...SendDecorator) (*http.Response, error) { + if c.SendDecorators != nil { + decorators = c.SendDecorators + } + inCtx := req.Context().Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + decorators = sd + } + return SendWithSender(c, req, decorators...) +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 000000000..c45710656 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/go.mod new file mode 100644 index 000000000..f88ecc402 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/autorest/date + +go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/go.sum b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/go.sum new file mode 100644 index 000000000..1fc56a962 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go new file mode 100644 index 000000000..4e0543207 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 000000000..b453fad04 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 000000000..48fb39ba9 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 000000000..7073959b2 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 000000000..12addf0eb --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/error.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 000000000..f724f3332 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,98 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/go.mod b/extender/code/vendor/github.com/Azure/go-autorest/autorest/go.mod new file mode 100644 index 000000000..499c56de4 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest/adal v0.8.2 + github.com/Azure/go-autorest/autorest/mocks v0.3.0 + github.com/Azure/go-autorest/logger v0.1.0 + github.com/Azure/go-autorest/tracing v0.5.0 + golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 +) diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/go.sum b/extender/code/vendor/github.com/Azure/go-autorest/autorest/go.sum new file mode 100644 index 000000000..37398d1d4 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -0,0 +1,30 @@ +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 000000000..6e8ed64eb --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,550 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerAuxAuthorization = "x-ms-authorization-auxiliary" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// used as a key type in context.WithValue() +type ctxPrepareDecorators struct{} + +// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. +// If no PrepareDecorators are provided the context is unchanged. +func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { + if len(prepareDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) +} + +// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. +func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { + inCtx := ctx.Value(ctxPrepareDecorators{}) + if pd, ok := inCtx.([]PrepareDecorator); ok { + return pd + } + return defaultPrepareDecorators +} + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. +func AsMerge() PrepareDecorator { return WithMethod("MERGE") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithBytes returns a PrepareDecorator that takes a list of bytes +// which passes the bytes directly to the body +func WithBytes(input *[]byte) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if input == nil { + return r, fmt.Errorf("Input Bytes was nil") + } + + r.ContentLength = int64(len(*input)) + r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the +// request and sets the Content-Length header. +func WithXML(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := xml.Marshal(v) + if err == nil { + // we have to tack on an XML header + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + + r.ContentLength = int64(len(bytesWithHeader)) + r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := MapToValues(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + for i := range value { + d, err := url.QueryUnescape(value[i]) + if err != nil { + return r, err + } + value[i] = d + } + v[key] = value + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/responder.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 000000000..349e1963a --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,269 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingBytes(v *[]byte) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + bytes, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + *v = bytes + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 000000000..fa11dbed7 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 000000000..7143cc61b --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 000000000..ae15c6bf9 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/sender.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 000000000..a07670b8c --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,419 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/tls" + "fmt" + "log" + "math" + "net/http" + "net/http/cookiejar" + "strconv" + "time" + + "github.com/Azure/go-autorest/tracing" +) + +// used as a key type in context.WithValue() +type ctxSendDecorators struct{} + +// WithSendDecorators adds the specified SendDecorators to the provided context. +// If no SendDecorators are provided the context is unchanged. +func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { + if len(sendDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) +} + +// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. +func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { + inCtx := ctx.Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + return sd + } + return defaultSendDecorators +} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(tls.RenegotiateNever), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +func sender(renengotiation tls.RenegotiationSupport) Sender { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: renengotiation, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j, Transport: roundTripper} +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Context().Done()) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequestWithContext(r.Context(), resp) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. +// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...) + }) + } +} + +// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the +// specified number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater +// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. +func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...) + }) + } +} + +func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt, delayCount := 0, 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + // enforce a 2 minute cap between requests when 429 status codes are + // not going to be counted as an attempt and when the cap is 0. + // this should only happen in the absence of a retry-after header. + if !count429 && cap == 0 { + cap = 2 * time.Minute + } + if !delayed && !DelayForBackoffWithCap(backoff, cap, delayCount, r.Context().Done()) { + return resp, r.Context().Err() + } + // when count429 == false don't count a 429 against the number + // of attempts so that we continue to retry until it succeeds + if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { + attempt++ + } + // delay count is tracked separately from attempts to + // ensure that 429 participates in exponential back-off + delayCount++ + } + return resp, err +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. +// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. +// The function returns true after successfully waiting for the specified duration. If there is +// no Retry-After header or the wait is cancelled the return value is false. +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + var dur time.Duration + ra := resp.Header.Get("Retry-After") + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + dur = time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + dur = t.Sub(time.Now()) + } + if dur > 0 { + select { + case <-time.After(dur): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + return DelayForBackoffWithCap(backoff, 0, attempt, cancel) +} + +// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. +// The delay may be canceled by closing the passed channel. If terminated early, returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { + d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second + if cap > 0 && d > cap { + d = cap + } + select { + case <-time.After(d): + return true + case <-cancel: + return false + } +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE b/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/convert.go new file mode 100644 index 000000000..86694bd25 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/convert.go @@ -0,0 +1,152 @@ +/* +Package to provides helpers to ease working with pointer values of marshalled structures. +*/ +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// String returns a string value for the passed string pointer. It returns the empty string if the +// pointer is nil. +func String(s *string) string { + if s != nil { + return *s + } + return "" +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil +// slice if the pointer is nil. +func StringSlice(s *[]string) []string { + if s != nil { + return *s + } + return nil +} + +// StringSlicePtr returns a pointer to the passed string slice. +func StringSlicePtr(s []string) *[]string { + return &s +} + +// StringMap returns a map of strings built from the map of string pointers. The empty string is +// used for nil pointers. +func StringMap(msp map[string]*string) map[string]string { + ms := make(map[string]string, len(msp)) + for k, sp := range msp { + if sp != nil { + ms[k] = *sp + } else { + ms[k] = "" + } + } + return ms +} + +// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. +func StringMapPtr(ms map[string]string) *map[string]*string { + msp := make(map[string]*string, len(ms)) + for k, s := range ms { + msp[k] = StringPtr(s) + } + return &msp +} + +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. +func Bool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +// BoolPtr returns a pointer to the passed bool. +func BoolPtr(b bool) *bool { + return &b +} + +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int(i *int) int { + if i != nil { + return *i + } + return 0 +} + +// IntPtr returns a pointer to the passed int. +func IntPtr(i int) *int { + return &i +} + +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int32(i *int32) int32 { + if i != nil { + return *i + } + return 0 +} + +// Int32Ptr returns a pointer to the passed int32. +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int64(i *int64) int64 { + if i != nil { + return *i + } + return 0 +} + +// Int64Ptr returns a pointer to the passed int64. +func Int64Ptr(i int64) *int64 { + return &i +} + +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float32(i *float32) float32 { + if i != nil { + return *i + } + return 0.0 +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float64(i *float64) float64 { + if i != nil { + return *i + } + return 0.0 +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} + +// ByteSlicePtr returns a pointer to the passed byte slice. +func ByteSlicePtr(b []byte) *[]byte { + return &b +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/go.mod b/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/go.mod new file mode 100644 index 000000000..48fd8c6e5 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/autorest/to + +go 1.12 + +require github.com/Azure/go-autorest/autorest v0.9.0 diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/go.sum b/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/go.sum new file mode 100644 index 000000000..d7ee6b462 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/go.sum @@ -0,0 +1,17 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go new file mode 100644 index 000000000..8e8292107 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/utility.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 000000000..67baab2ce --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,239 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// AsStringSlice method converts interface{} to []string. +// s must be of type slice or array or an error is returned. +// Each element of s will be converted to its string representation. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not a slice or array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, fmt.Sprintf("%v", v.Index(i))) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the separator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false +// if it's not. If the error doesn't implement the net.Error interface the return value is true. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} + +// DrainResponseBody reads the response body then closes it. +func DrainResponseBody(resp *http.Response) error { + if resp != nil && resp.Body != nil { + _, err := io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + return err + } + return nil +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE b/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/error.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/error.go new file mode 100644 index 000000000..fed156dbf --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/error.go @@ -0,0 +1,48 @@ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" +) + +// Error is the type that's returned when the validation of an APIs arguments constraints fails. +type Error struct { + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // Message is the error message. + Message string +} + +// Error returns a string containing the details of the validation failure. +func (e Error) Error() string { + return fmt.Sprintf("%s#%s: Invalid input: %s", e.PackageType, e.Method, e.Message) +} + +// NewError creates a new Error object with the specified parameters. +// message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) Error { + return Error{ + PackageType: packageType, + Method: method, + Message: fmt.Sprintf(message, args...), + } +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod b/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod new file mode 100644 index 000000000..c44c51ff6 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/autorest/validation + +go 1.12 + +require github.com/stretchr/testify v1.3.0 diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum b/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum new file mode 100644 index 000000000..4347755af --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum @@ -0,0 +1,7 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go new file mode 100644 index 000000000..65899b69b --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go @@ -0,0 +1,400 @@ +/* +Package validation provides methods for validating parameter value using reflection. +*/ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint stores constraint name, target field name +// Rule and chain validations. +type Constraint struct { + + // Target field name for validation. + Target string + + // Constraint name e.g. minLength, MaxLength, Pattern, etc. + Name string + + // Rule for constraint e.g. greater than 10, less than 5 etc. + Rule interface{} + + // Chain Validations for struct type + Chain []Constraint +} + +// Validation stores parameter-wise validation. +type Validation struct { + TargetValue interface{} + Constraints []Constraint +} + +// Constraint list +const ( + Empty = "Empty" + Null = "Null" + ReadOnly = "ReadOnly" + Pattern = "Pattern" + MaxLength = "MaxLength" + MinLength = "MinLength" + MaxItems = "MaxItems" + MinItems = "MinItems" + MultipleOf = "MultipleOf" + UniqueItems = "UniqueItems" + InclusiveMaximum = "InclusiveMaximum" + ExclusiveMaximum = "ExclusiveMaximum" + ExclusiveMinimum = "ExclusiveMinimum" + InclusiveMinimum = "InclusiveMinimum" +) + +// Validate method validates constraints on parameter +// passed in validation array. +func Validate(m []Validation) error { + for _, item := range m { + v := reflect.ValueOf(item.TargetValue) + for _, constraint := range item.Constraints { + var err error + switch v.Kind() { + case reflect.Ptr: + err = validatePtr(v, constraint) + case reflect.String: + err = validateString(v, constraint) + case reflect.Struct: + err = validateStruct(v, constraint) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + err = validateInt(v, constraint) + case reflect.Float32, reflect.Float64: + err = validateFloat(v, constraint) + case reflect.Array, reflect.Slice, reflect.Map: + err = validateArrayMap(v, constraint) + default: + err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) + } + + if err != nil { + return err + } + } + } + return nil +} + +func validateStruct(x reflect.Value, v Constraint, name ...string) error { + //Get field name from target name which is in format a.b.c + s := strings.Split(v.Target, ".") + f := x.FieldByName(s[len(s)-1]) + if isZero(f) { + return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) + } + + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(f), + Constraints: []Constraint{v}, + }, + }) +} + +func validatePtr(x reflect.Value, v Constraint) error { + if v.Name == ReadOnly { + if !x.IsNil() { + return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") + } + return nil + } + if x.IsNil() { + return checkNil(x, v) + } + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x.Elem()), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateInt(x reflect.Value, v Constraint) error { + i := x.Int() + r, ok := toInt64(v.Rule) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case MultipleOf: + if i%r != 0 { + return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) + } + case ExclusiveMinimum: + if i <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if i >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if i < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if i > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) + } + return nil +} + +func validateFloat(x reflect.Value, v Constraint) error { + f := x.Float() + r, ok := v.Rule.(float64) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case ExclusiveMinimum: + if f <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if f >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if f < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if f > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) + } + return nil +} + +func validateString(x reflect.Value, v Constraint) error { + s := x.String() + switch v.Name { + case Empty: + if len(s) == 0 { + return checkEmpty(x, v) + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + if !reg.MatchString(s) { + return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) + } + case MaxLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) + } + case MinLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) + } + case ReadOnly: + if len(s) > 0 { + return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateArrayMap(x reflect.Value, v Constraint) error { + switch v.Name { + case Null: + if x.IsNil() { + return checkNil(x, v) + } + case Empty: + if x.IsNil() || x.Len() == 0 { + return checkEmpty(x, v) + } + case MaxItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) + } + case MinItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) + } + case UniqueItems: + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + if !checkForUniqueInArray(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else if x.Kind() == reflect.Map { + if !checkForUniqueInMap(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else { + return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) + } + case ReadOnly: + if x.Len() != 0 { + return createError(x, v, "readonly parameter; must send as nil or empty in request") + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + keys := x.MapKeys() + for _, k := range keys { + if !reg.MatchString(k.String()) { + return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) + } + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func checkNil(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + if v.Rule.(bool) { + return createError(x, v, "value can not be null; required parameter") + } + return nil +} + +func checkEmpty(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + + if v.Rule.(bool) { + return createError(x, v, "value can not be null or empty; required parameter") + } + return nil +} + +func checkForUniqueInArray(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + arrOfInterface := make([]interface{}, x.Len()) + + for i := 0; i < x.Len(); i++ { + arrOfInterface[i] = x.Index(i).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range arrOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func checkForUniqueInMap(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + mapOfInterface := make(map[interface{}]interface{}, x.Len()) + + keys := x.MapKeys() + for _, k := range keys { + mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range mapOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func getInterfaceValue(x reflect.Value) interface{} { + if x.Kind() == reflect.Invalid { + return nil + } + return x.Interface() +} + +func isZero(x interface{}) bool { + return x == reflect.Zero(reflect.TypeOf(x)).Interface() +} + +func createError(x reflect.Value, v Constraint, err string) error { + return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", + v.Target, v.Name, getInterfaceValue(x), err) +} + +func toInt64(v interface{}) (int64, bool) { + if i64, ok := v.(int64); ok { + return i64, true + } + // older generators emit max constants as int, so if int64 fails fall back to int + if i32, ok := v.(int); ok { + return int64(i32), true + } + return 0, false +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/autorest/version.go b/extender/code/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 000000000..a42f705d3 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,41 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +const number = "v13.4.0" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return number +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/extender/code/vendor/github.com/Azure/go-autorest/azure-pipelines.yml new file mode 100644 index 000000000..6fb8404fd --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/azure-pipelines.yml @@ -0,0 +1,105 @@ +variables: + GOPATH: '$(system.defaultWorkingDirectory)/work' + sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' + +jobs: + - job: 'goautorest' + displayName: 'Run go-autorest CI Checks' + + strategy: + matrix: + Linux_Go113: + vm.image: 'ubuntu-18.04' + go.version: '1.13' + Linux_Go114: + vm.image: 'ubuntu-18.04' + go.version: '1.14' + + pool: + vmImage: '$(vm.image)' + + steps: + - task: GoTool@0 + inputs: + version: '$(go.version)' + displayName: "Select Go Version" + + - script: | + set -e + mkdir -p '$(GOPATH)/bin' + mkdir -p '$(sdkPath)' + shopt -s extglob + mv !(work) '$(sdkPath)' + echo '##vso[task.prependpath]$(GOPATH)/bin' + displayName: 'Create Go Workspace' + + - script: | + set -e + curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure -v + go install ./vendor/golang.org/x/lint/golint + go get github.com/jstemmer/go-junit-report + go get github.com/axw/gocov/gocov + go get github.com/AlekSi/gocov-xml + go get -u github.com/matm/gocov-html + workingDirectory: '$(sdkPath)' + displayName: 'Install Dependencies' + + - script: | + go vet ./autorest/... + go vet ./logger/... + go vet ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Vet' + + - script: | + go build -v ./autorest/... + go build -v ./logger/... + go build -v ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Build' + + - script: | + set -e + go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml + gocov convert coverage.txt > coverage.json + gocov-xml < coverage.json > coverage.xml + gocov-html < coverage.json > coverage.html + workingDirectory: '$(sdkPath)' + displayName: 'Run Tests' + + - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Copyright Header Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + gofmt -s -l -w ./autorest/. >&2 + gofmt -s -l -w ./logger/. >&2 + gofmt -s -l -w ./tracing/. >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Format Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + golint ./autorest/... >&2 + golint ./logger/... >&2 + golint ./tracing/... >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Linter Check' + failOnStderr: true + condition: succeededOrFailed() + + - task: PublishTestResults@2 + inputs: + testRunner: JUnit + testResultsFiles: $(sdkPath)/report.xml + failTaskOnFailedTests: true + + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(sdkPath)/coverage.xml + additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/extender/code/vendor/github.com/Azure/go-autorest/doc.go b/extender/code/vendor/github.com/Azure/go-autorest/doc.go new file mode 100644 index 000000000..99ae6ca98 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/doc.go @@ -0,0 +1,18 @@ +/* +Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages. +*/ +package go_autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/extender/code/vendor/github.com/Azure/go-autorest/logger/LICENSE b/extender/code/vendor/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/Azure/go-autorest/logger/go.mod b/extender/code/vendor/github.com/Azure/go-autorest/logger/go.mod new file mode 100644 index 000000000..f22ed56bc --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/logger + +go 1.12 diff --git a/extender/code/vendor/github.com/Azure/go-autorest/logger/logger.go b/extender/code/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 000000000..da09f394c --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,328 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/extender/code/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/extender/code/vendor/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/Azure/go-autorest/tracing/go.mod b/extender/code/vendor/github.com/Azure/go-autorest/tracing/go.mod new file mode 100644 index 000000000..a2cdec78c --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/tracing + +go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/extender/code/vendor/github.com/Azure/go-autorest/tracing/go.sum b/extender/code/vendor/github.com/Azure/go-autorest/tracing/go.sum new file mode 100644 index 000000000..1fc56a962 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/tracing/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/extender/code/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/extender/code/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go new file mode 100644 index 000000000..e163975cd --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package tracing + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/extender/code/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/extender/code/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 000000000..0e7a6e962 --- /dev/null +++ b/extender/code/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,67 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" +) + +// Tracer represents an HTTP tracing facility. +type Tracer interface { + NewTransport(base *http.Transport) http.RoundTripper + StartSpan(ctx context.Context, name string) context.Context + EndSpan(ctx context.Context, httpStatusCode int, err error) +} + +var ( + tracer Tracer +) + +// Register will register the provided Tracer. Pass nil to unregister a Tracer. +func Register(t Tracer) { + tracer = t +} + +// IsEnabled returns true if a Tracer has been registered. +func IsEnabled() bool { + return tracer != nil +} + +// NewTransport creates a new instrumenting http.RoundTripper for the +// registered Tracer. If no Tracer has been registered it returns nil. +func NewTransport(base *http.Transport) http.RoundTripper { + if tracer != nil { + return tracer.NewTransport(base) + } + return nil +} + +// StartSpan starts a trace span with the specified name, associating it with the +// provided context. Has no effect if a Tracer has not been registered. +func StartSpan(ctx context.Context, name string) context.Context { + if tracer != nil { + return tracer.StartSpan(ctx, name) + } + return ctx +} + +// EndSpan ends a previously started span stored in the context. +// Has no effect if a Tracer has not been registered. +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + if tracer != nil { + tracer.EndSpan(ctx, httpStatusCode, err) + } +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/LICENSE b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/LICENSE new file mode 100644 index 000000000..b397cbe1b --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Google LLC + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/cache/cache.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/cache/cache.go new file mode 100644 index 000000000..94187a341 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/cache/cache.go @@ -0,0 +1,194 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "os" + "path" + "path/filepath" + "time" + + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/GoogleContainerTools/kaniko/pkg/creds" + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// LayerCache is the layer cache +type LayerCache interface { + RetrieveLayer(string) (v1.Image, error) +} + +// RegistryCache is the registry cache +type RegistryCache struct { + Opts *config.KanikoOptions +} + +// RetrieveLayer retrieves a layer from the cache given the cache key ck. +func (rc *RegistryCache) RetrieveLayer(ck string) (v1.Image, error) { + cache, err := Destination(rc.Opts, ck) + if err != nil { + return nil, errors.Wrap(err, "getting cache destination") + } + logrus.Infof("Checking for cached layer %s...", cache) + + cacheRef, err := name.NewTag(cache, name.WeakValidation) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("getting reference for %s", cache)) + } + + registryName := cacheRef.Repository.Registry.Name() + if rc.Opts.Insecure || rc.Opts.InsecureRegistries.Contains(registryName) { + newReg, err := name.NewRegistry(registryName, name.WeakValidation, name.Insecure) + if err != nil { + return nil, err + } + cacheRef.Repository.Registry = newReg + } + + tr := util.MakeTransport(rc.Opts.RegistryOptions, registryName) + + img, err := remote.Image(cacheRef, remote.WithTransport(tr), remote.WithAuthFromKeychain(creds.GetKeychain())) + if err != nil { + return nil, err + } + + cf, err := img.ConfigFile() + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("retrieving config file for %s", cache)) + } + + expiry := cf.Created.Add(rc.Opts.CacheTTL) + // Layer is stale, rebuild it. + if expiry.Before(time.Now()) { + logrus.Infof("Cache entry expired: %s", cache) + return nil, fmt.Errorf("Cache entry expired: %s", cache) + } + + // Force the manifest to be populated + if _, err := img.RawManifest(); err != nil { + return nil, err + } + return img, nil +} + +// Destination returns the repo where the layer should be stored +// If no cache is specified, one is inferred from the destination provided +func Destination(opts *config.KanikoOptions, cacheKey string) (string, error) { + cache := opts.CacheRepo + if cache == "" { + destination := opts.Destinations[0] + destRef, err := name.NewTag(destination, name.WeakValidation) + if err != nil { + return "", errors.Wrap(err, "getting tag for destination") + } + return fmt.Sprintf("%s/cache:%s", destRef.Context(), cacheKey), nil + } + return fmt.Sprintf("%s:%s", cache, cacheKey), nil +} + +// LocalSource retrieves a source image from a local cache given cacheKey +func LocalSource(opts *config.CacheOptions, cacheKey string) (v1.Image, error) { + cache := opts.CacheDir + if cache == "" { + return nil, nil + } + + path := path.Join(cache, cacheKey) + + fi, err := os.Stat(path) + if err != nil { + msg := fmt.Sprintf("No file found for cache key %v %v", cacheKey, err) + logrus.Debug(msg) + return nil, NotFoundErr{msg: msg} + } + + // A stale cache is a bad cache + expiry := fi.ModTime().Add(opts.CacheTTL) + if expiry.Before(time.Now()) { + msg := fmt.Sprintf("Cached image is too old: %v", fi.ModTime()) + logrus.Debug(msg) + return nil, ExpiredErr{msg: msg} + } + + logrus.Infof("Found %s in local cache", cacheKey) + return cachedImageFromPath(path) +} + +// cachedImage represents a v1.Tarball that is cached locally in a CAS. +// Computing the digest for a v1.Tarball is very expensive. If the tarball +// is named with the digest we can store this and return it directly rather +// than recompute it. +type cachedImage struct { + digest string + v1.Image + mfst *v1.Manifest +} + +func (c *cachedImage) Digest() (v1.Hash, error) { + return v1.NewHash(c.digest) +} + +func (c *cachedImage) Manifest() (*v1.Manifest, error) { + if c.mfst == nil { + return c.Image.Manifest() + } + return c.mfst, nil +} + +func mfstFromPath(p string) (*v1.Manifest, error) { + f, err := os.Open(p) + if err != nil { + return nil, err + } + defer f.Close() + return v1.ParseManifest(f) +} + +func cachedImageFromPath(p string) (v1.Image, error) { + imgTar, err := tarball.ImageFromPath(p, nil) + if err != nil { + return nil, errors.Wrap(err, "getting image from path") + } + + // Manifests may be present next to the tar, named with a ".json" suffix + mfstPath := p + ".json" + + var mfst *v1.Manifest + if _, err := os.Stat(mfstPath); err != nil { + logrus.Debugf("Manifest does not exist at file: %s", mfstPath) + } else { + mfst, err = mfstFromPath(mfstPath) + if err != nil { + logrus.Debugf("Error parsing manifest from file: %s", mfstPath) + } else { + logrus.Infof("Found manifest at %s", mfstPath) + } + } + + return &cachedImage{ + digest: filepath.Base(p), + Image: imgTar, + mfst: mfst, + }, nil +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/cache/errors.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/cache/errors.go new file mode 100644 index 000000000..806f438ce --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/cache/errors.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// IsAlreadyCached returns true if the supplied error is of the type AlreadyCachedErr +// otherwise it returns false. +func IsAlreadyCached(err error) bool { + switch err.(type) { + case AlreadyCachedErr: + return true + } + + return false +} + +// AlreadyCachedErr is returned when the Docker image requested for caching is already +// present in the cache. +type AlreadyCachedErr struct { + msg string +} + +func (a AlreadyCachedErr) Error() string { + return a.msg +} + +// IsNotFound returns true if the supplied error is of the type NotFoundErr +// otherwise it returns false. +func IsNotFound(e error) bool { + switch e.(type) { + case NotFoundErr: + return true + } + + return false +} + +// NotFoundErr is returned when the requested Docker image is not present in the cache. +type NotFoundErr struct { + msg string +} + +func (e NotFoundErr) Error() string { + return e.msg +} + +// IsExpired returns true if the supplied error is of the type ExpiredErr +// otherwise it returns false. +func IsExpired(e error) bool { + switch e.(type) { + case ExpiredErr: + return true + } + + return false +} + +// ExpiredErr is returned when the requested Docker image is present in the cache, but is +// expired according to the supplied TTL. +type ExpiredErr struct { + msg string +} + +func (e ExpiredErr) Error() string { + return e.msg +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/cache/warm.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/cache/warm.go new file mode 100644 index 000000000..fd9e3c607 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/cache/warm.go @@ -0,0 +1,159 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path" + + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/GoogleContainerTools/kaniko/pkg/image/remote" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// WarmCache populates the cache +func WarmCache(opts *config.WarmerOptions) error { + cacheDir := opts.CacheDir + images := opts.Images + logrus.Debugf("%s\n", cacheDir) + logrus.Debugf("%s\n", images) + + errs := 0 + for _, img := range images { + tarBuf := new(bytes.Buffer) + manifestBuf := new(bytes.Buffer) + + cw := &Warmer{ + Remote: remote.RetrieveRemoteImage, + Local: LocalSource, + TarWriter: tarBuf, + ManifestWriter: manifestBuf, + } + + digest, err := cw.Warm(img, opts) + if err != nil { + if !IsAlreadyCached(err) { + logrus.Warnf("Error while trying to warm image: %v %v", img, err) + errs++ + } + + continue + } + + cachePath := path.Join(cacheDir, digest.String()) + + if err := writeBufsToFile(cachePath, tarBuf, manifestBuf); err != nil { + logrus.Warnf("Error while writing %v to cache: %v", img, err) + errs++ + continue + } + + logrus.Debugf("Wrote %s to cache", img) + } + + if len(images) == errs { + return errors.New("Failed to warm any of the given images") + } + + return nil +} + +func writeBufsToFile(cachePath string, tarBuf, manifestBuf *bytes.Buffer) error { + f, err := os.Create(cachePath) + if err != nil { + return err + } + defer f.Close() + + if _, err := f.Write(tarBuf.Bytes()); err != nil { + return errors.Wrap(err, "Failed to save tar to file") + } + + mfstPath := cachePath + ".json" + if err := ioutil.WriteFile(mfstPath, manifestBuf.Bytes(), 0666); err != nil { + return errors.Wrap(err, "Failed to save manifest to file") + } + + return nil +} + +// FetchRemoteImage retrieves a Docker image manifest from a remote source. +// github.com/GoogleContainerTools/kaniko/image/remote.RetrieveRemoteImage can be used as +// this type. +type FetchRemoteImage func(image string, opts config.RegistryOptions, customPlatform string) (v1.Image, error) + +// FetchLocalSource retrieves a Docker image manifest from a local source. +// github.com/GoogleContainerTools/kaniko/cache.LocalSource can be used as +// this type. +type FetchLocalSource func(*config.CacheOptions, string) (v1.Image, error) + +// Warmer is used to prepopulate the cache with a Docker image +type Warmer struct { + Remote FetchRemoteImage + Local FetchLocalSource + TarWriter io.Writer + ManifestWriter io.Writer +} + +// Warm retrieves a Docker image and populates the supplied buffer with the image content and manifest +// or returns an AlreadyCachedErr if the image is present in the cache. +func (w *Warmer) Warm(image string, opts *config.WarmerOptions) (v1.Hash, error) { + cacheRef, err := name.ParseReference(image, name.WeakValidation) + if err != nil { + return v1.Hash{}, errors.Wrapf(err, "Failed to verify image name: %s", image) + } + + img, err := w.Remote(image, opts.RegistryOptions, opts.CustomPlatform) + if err != nil || img == nil { + return v1.Hash{}, errors.Wrapf(err, "Failed to retrieve image: %s", image) + } + + digest, err := img.Digest() + if err != nil { + return v1.Hash{}, errors.Wrapf(err, "Failed to retrieve digest: %s", image) + } + + if !opts.Force { + _, err := w.Local(&opts.CacheOptions, digest.String()) + if err == nil || IsExpired(err) { + return v1.Hash{}, AlreadyCachedErr{} + } + } + + err = tarball.Write(cacheRef, img, w.TarWriter) + if err != nil { + return v1.Hash{}, errors.Wrapf(err, "Failed to write %s to tar buffer", image) + } + + mfst, err := img.RawManifest() + if err != nil { + return v1.Hash{}, errors.Wrapf(err, "Failed to retrieve manifest for %s", image) + } + + if _, err := w.ManifestWriter.Write(mfst); err != nil { + return v1.Hash{}, errors.Wrapf(err, "Failed to save manifest to buffer for %s", image) + } + + return digest, nil +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/add.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/add.go new file mode 100644 index 000000000..ae86f3538 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/add.go @@ -0,0 +1,154 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "path/filepath" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/pkg/errors" + + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/sirupsen/logrus" +) + +type AddCommand struct { + BaseCommand + cmd *instructions.AddCommand + fileContext util.FileContext + snapshotFiles []string +} + +// ExecuteCommand executes the ADD command +// Special stuff about ADD: +// 1. If is a remote file URL: +// - destination will have permissions of 0600 +// - If remote file has HTTP Last-Modified header, we set the mtime of the file to that timestamp +// - If dest doesn't end with a slash, the filepath is inferred to be / +// 2. If is a local tar archive: +// - it is unpacked at the dest, as 'tar -x' would +func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + + uid, gid, err := util.GetUserGroup(a.cmd.Chown, replacementEnvs) + if err != nil { + return errors.Wrap(err, "getting user group from chown") + } + + srcs, dest, err := util.ResolveEnvAndWildcards(a.cmd.SourcesAndDest, a.fileContext, replacementEnvs) + if err != nil { + return err + } + + var unresolvedSrcs []string + // If any of the sources are local tar archives: + // 1. Unpack them to the specified destination + // If any of the sources is a remote file URL: + // 1. Download and copy it to the specified dest + // Else, add to the list of unresolved sources + for _, src := range srcs { + fullPath := filepath.Join(a.fileContext.Root, src) + if util.IsSrcRemoteFileURL(src) { + urlDest, err := util.URLDestinationFilepath(src, dest, config.WorkingDir, replacementEnvs) + if err != nil { + return err + } + logrus.Infof("Adding remote URL %s to %s", src, urlDest) + if err := util.DownloadFileToDest(src, urlDest, uid, gid); err != nil { + return errors.Wrap(err, "downloading remote source file") + } + a.snapshotFiles = append(a.snapshotFiles, urlDest) + } else if util.IsFileLocalTarArchive(fullPath) { + tarDest, err := util.DestinationFilepath("", dest, config.WorkingDir) + if err != nil { + return errors.Wrap(err, "determining dest for tar") + } + logrus.Infof("Unpacking local tar archive %s to %s", src, tarDest) + extractedFiles, err := util.UnpackLocalTarArchive(fullPath, tarDest) + if err != nil { + return errors.Wrap(err, "unpacking local tar") + } + logrus.Debugf("Added %v from local tar archive %s", extractedFiles, src) + a.snapshotFiles = append(a.snapshotFiles, extractedFiles...) + } else { + unresolvedSrcs = append(unresolvedSrcs, src) + } + } + // With the remaining "normal" sources, create and execute a standard copy command + if len(unresolvedSrcs) == 0 { + return nil + } + + copyCmd := CopyCommand{ + cmd: &instructions.CopyCommand{ + SourcesAndDest: append(unresolvedSrcs, dest), + Chown: a.cmd.Chown, + }, + fileContext: a.fileContext, + } + + if err := copyCmd.ExecuteCommand(config, buildArgs); err != nil { + return errors.Wrap(err, "executing copy command") + } + a.snapshotFiles = append(a.snapshotFiles, copyCmd.snapshotFiles...) + return nil +} + +// FilesToSnapshot should return an empty array if still nil; no files were changed +func (a *AddCommand) FilesToSnapshot() []string { + return a.snapshotFiles +} + +// String returns some information about the command for the image config +func (a *AddCommand) String() string { + return a.cmd.String() +} + +func (a *AddCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerfile.BuildArgs) ([]string, error) { + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + + srcs, _, err := util.ResolveEnvAndWildcards(a.cmd.SourcesAndDest, a.fileContext, replacementEnvs) + if err != nil { + return nil, err + } + + files := []string{} + for _, src := range srcs { + if util.IsSrcRemoteFileURL(src) { + continue + } + if util.IsFileLocalTarArchive(src) { + continue + } + fullPath := filepath.Join(a.fileContext.Root, src) + files = append(files, fullPath) + } + + logrus.Infof("Using files from context: %v", files) + return files, nil +} + +func (a *AddCommand) MetadataOnly() bool { + return false +} + +func (a *AddCommand) RequiresUnpackedFS() bool { + return true +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/arg.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/arg.go new file mode 100644 index 000000000..598e59a50 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/arg.go @@ -0,0 +1,67 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/util" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +type ArgCommand struct { + BaseCommand + cmd *instructions.ArgCommand +} + +// ExecuteCommand only needs to add this ARG key/value as seen +func (r *ArgCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + key, val, err := ParseArg(r.cmd.Key, r.cmd.Value, config.Env, buildArgs) + if err != nil { + return err + } + + buildArgs.AddArg(key, val) + return nil +} + +func ParseArg(key string, val *string, env []string, ba *dockerfile.BuildArgs) (string, *string, error) { + replacementEnvs := ba.ReplacementEnvs(env) + resolvedKey, err := util.ResolveEnvironmentReplacement(key, replacementEnvs, false) + if err != nil { + return "", nil, err + } + var resolvedValue *string + if val != nil { + value, err := util.ResolveEnvironmentReplacement(*val, replacementEnvs, false) + if err != nil { + return "", nil, err + } + resolvedValue = &value + } else { + meta := ba.GetAllMeta() + if value, ok := meta[resolvedKey]; ok { + resolvedValue = &value + } + } + return resolvedKey, resolvedValue, nil +} + +// String returns some information about the command for the image config history +func (r *ArgCommand) String() string { + return r.cmd.String() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/base_command.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/base_command.go new file mode 100644 index 000000000..f42e68e02 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/base_command.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +type BaseCommand struct { +} + +func (b *BaseCommand) CacheCommand(v1.Image) DockerCommand { + return nil +} + +func (b *BaseCommand) FilesToSnapshot() []string { + return []string{} +} + +func (b *BaseCommand) ProvidesFilesToSnapshot() bool { + return true +} + +func (b *BaseCommand) FilesUsedFromContext(_ *v1.Config, _ *dockerfile.BuildArgs) ([]string, error) { + return []string{}, nil +} + +func (b *BaseCommand) MetadataOnly() bool { + return true +} + +func (b *BaseCommand) RequiresUnpackedFS() bool { + return false +} + +func (b *BaseCommand) ShouldCacheOutput() bool { + return false +} + +func (b *BaseCommand) ShouldDetectDeletedFiles() bool { + return false +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/cache.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/cache.go new file mode 100644 index 000000000..878440df4 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/cache.go @@ -0,0 +1,31 @@ +/* +Copyright 2020 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import v1 "github.com/google/go-containerregistry/pkg/v1" + +type Cached interface { + Layer() v1.Layer +} + +type caching struct { + layer v1.Layer +} + +func (c caching) Layer() v1.Layer { + return c.layer +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/cmd.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/cmd.go new file mode 100644 index 000000000..e3788d9a7 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/cmd.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "strings" + + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + v1 "github.com/google/go-containerregistry/pkg/v1" + + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +type CmdCommand struct { + BaseCommand + cmd *instructions.CmdCommand +} + +// ExecuteCommand executes the CMD command +// Argument handling is the same as RUN. +func (c *CmdCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + var newCommand []string + if c.cmd.PrependShell { + // This is the default shell on Linux + var shell []string + if len(config.Shell) > 0 { + shell = config.Shell + } else { + shell = append(shell, "/bin/sh", "-c") + } + + newCommand = append(shell, strings.Join(c.cmd.CmdLine, " ")) + } else { + newCommand = c.cmd.CmdLine + } + + config.Cmd = newCommand + // ArgsEscaped is only used in windows environments + config.ArgsEscaped = false + return nil +} + +// String returns some information about the command for the image config history +func (c *CmdCommand) String() string { + return c.cmd.String() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/commands.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/commands.go new file mode 100644 index 000000000..2683c98f5 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/commands.go @@ -0,0 +1,105 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/util" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type CurrentCacheKey func() (string, error) + +type DockerCommand interface { + // ExecuteCommand is responsible for: + // 1. Making required changes to the filesystem (ex. copying files for ADD/COPY or setting ENV variables) + // 2. Updating metadata fields in the config + // It should not change the config history. + ExecuteCommand(*v1.Config, *dockerfile.BuildArgs) error + // Returns a string representation of the command + String() string + // A list of files to snapshot, empty for metadata commands or nil if we don't know + FilesToSnapshot() []string + + // ProvidesFileToSnapshot is true for all metadata commands and commands which know + // list of files changed. False for Run command. + ProvidesFilesToSnapshot() bool + + // Return a cache-aware implementation of this command, if it exists. + CacheCommand(v1.Image) DockerCommand + + // Return true if this command depends on the build context. + FilesUsedFromContext(*v1.Config, *dockerfile.BuildArgs) ([]string, error) + + MetadataOnly() bool + + RequiresUnpackedFS() bool + + // Whether the output layer of this command should be cached in and + // retrieved from the layer cache. + ShouldCacheOutput() bool + + // ShouldDetectDeletedFiles returns true if the command could delete files. + ShouldDetectDeletedFiles() bool +} + +func GetCommand(cmd instructions.Command, fileContext util.FileContext, useNewRun bool, cacheCopy bool) (DockerCommand, error) { + switch c := cmd.(type) { + case *instructions.RunCommand: + if useNewRun { + return &RunMarkerCommand{cmd: c}, nil + } + return &RunCommand{cmd: c}, nil + case *instructions.CopyCommand: + return &CopyCommand{cmd: c, fileContext: fileContext, shdCache: cacheCopy}, nil + case *instructions.ExposeCommand: + return &ExposeCommand{cmd: c}, nil + case *instructions.EnvCommand: + return &EnvCommand{cmd: c}, nil + case *instructions.WorkdirCommand: + return &WorkdirCommand{cmd: c}, nil + case *instructions.AddCommand: + return &AddCommand{cmd: c, fileContext: fileContext}, nil + case *instructions.CmdCommand: + return &CmdCommand{cmd: c}, nil + case *instructions.EntrypointCommand: + return &EntrypointCommand{cmd: c}, nil + case *instructions.LabelCommand: + return &LabelCommand{cmd: c}, nil + case *instructions.UserCommand: + return &UserCommand{cmd: c}, nil + case *instructions.OnbuildCommand: + return &OnBuildCommand{cmd: c}, nil + case *instructions.VolumeCommand: + return &VolumeCommand{cmd: c}, nil + case *instructions.StopSignalCommand: + return &StopSignalCommand{cmd: c}, nil + case *instructions.ArgCommand: + return &ArgCommand{cmd: c}, nil + case *instructions.ShellCommand: + return &ShellCommand{cmd: c}, nil + case *instructions.HealthCheckCommand: + return &HealthCheckCommand{cmd: c}, nil + case *instructions.MaintainerCommand: + logrus.Warnf("%s is deprecated, skipping", cmd.Name()) + return nil, nil + } + return nil, errors.Errorf("%s is not a supported command", cmd.Name()) +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/copy.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/copy.go new file mode 100644 index 000000000..977af6673 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/copy.go @@ -0,0 +1,312 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + kConfig "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/util" + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +// for testing +var ( + getUserGroup = util.GetUserGroup +) + +type CopyCommand struct { + BaseCommand + cmd *instructions.CopyCommand + fileContext util.FileContext + snapshotFiles []string + shdCache bool +} + +func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + // Resolve from + if c.cmd.From != "" { + c.fileContext = util.FileContext{Root: filepath.Join(kConfig.KanikoDir, c.cmd.From)} + } + + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + uid, gid, err := getUserGroup(c.cmd.Chown, replacementEnvs) + if err != nil { + return errors.Wrap(err, "getting user group from chown") + } + + srcs, dest, err := util.ResolveEnvAndWildcards(c.cmd.SourcesAndDest, c.fileContext, replacementEnvs) + if err != nil { + return errors.Wrap(err, "resolving src") + } + + // For each source, iterate through and copy it over + for _, src := range srcs { + fullPath := filepath.Join(c.fileContext.Root, src) + + fi, err := os.Lstat(fullPath) + if err != nil { + return errors.Wrap(err, "could not copy source") + } + if fi.IsDir() && !strings.HasSuffix(fullPath, string(os.PathSeparator)) { + fullPath += "/" + } + cwd := config.WorkingDir + if cwd == "" { + cwd = kConfig.RootDir + } + + destPath, err := util.DestinationFilepath(fullPath, dest, cwd) + if err != nil { + return errors.Wrap(err, "find destination path") + } + + // If the destination dir is a symlink we need to resolve the path and use + // that instead of the symlink path + destPath, err = resolveIfSymlink(destPath) + if err != nil { + return errors.Wrap(err, "resolving dest symlink") + } + + if fi.IsDir() { + copiedFiles, err := util.CopyDir(fullPath, destPath, c.fileContext, uid, gid) + if err != nil { + return errors.Wrap(err, "copying dir") + } + c.snapshotFiles = append(c.snapshotFiles, copiedFiles...) + } else if util.IsSymlink(fi) { + // If file is a symlink, we want to copy the target file to destPath + exclude, err := util.CopySymlink(fullPath, destPath, c.fileContext) + if err != nil { + return errors.Wrap(err, "copying symlink") + } + if exclude { + continue + } + c.snapshotFiles = append(c.snapshotFiles, destPath) + } else { + // ... Else, we want to copy over a file + exclude, err := util.CopyFile(fullPath, destPath, c.fileContext, uid, gid) + if err != nil { + return errors.Wrap(err, "copying file") + } + if exclude { + continue + } + c.snapshotFiles = append(c.snapshotFiles, destPath) + } + } + return nil +} + +// FilesToSnapshot should return an empty array if still nil; no files were changed +func (c *CopyCommand) FilesToSnapshot() []string { + return c.snapshotFiles +} + +// String returns some information about the command for the image config +func (c *CopyCommand) String() string { + return c.cmd.String() +} + +func (c *CopyCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerfile.BuildArgs) ([]string, error) { + return copyCmdFilesUsedFromContext(config, buildArgs, c.cmd, c.fileContext) +} + +func (c *CopyCommand) MetadataOnly() bool { + return false +} + +func (c *CopyCommand) RequiresUnpackedFS() bool { + return true +} + +func (c *CopyCommand) From() string { + return c.cmd.From +} + +func (c *CopyCommand) ShouldCacheOutput() bool { + return c.shdCache +} + +// CacheCommand returns true since this command should be cached +func (c *CopyCommand) CacheCommand(img v1.Image) DockerCommand { + return &CachingCopyCommand{ + img: img, + cmd: c.cmd, + fileContext: c.fileContext, + extractFn: util.ExtractFile, + } +} + +type CachingCopyCommand struct { + BaseCommand + caching + img v1.Image + extractedFiles []string + cmd *instructions.CopyCommand + fileContext util.FileContext + extractFn util.ExtractFunction +} + +func (cr *CachingCopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + logrus.Infof("Found cached layer, extracting to filesystem") + var err error + + if cr.img == nil { + return errors.New(fmt.Sprintf("cached command image is nil %v", cr.String())) + } + + layers, err := cr.img.Layers() + if err != nil { + return errors.Wrapf(err, "retrieve image layers") + } + + if len(layers) != 1 { + return errors.New(fmt.Sprintf("expected %d layers but got %d", 1, len(layers))) + } + + cr.layer = layers[0] + cr.extractedFiles, err = util.GetFSFromLayers(kConfig.RootDir, layers, util.ExtractFunc(cr.extractFn), util.IncludeWhiteout()) + + logrus.Debugf("extractedFiles: %s", cr.extractedFiles) + if err != nil { + return errors.Wrap(err, "extracting fs from image") + } + + return nil +} + +func (cr *CachingCopyCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerfile.BuildArgs) ([]string, error) { + return copyCmdFilesUsedFromContext(config, buildArgs, cr.cmd, cr.fileContext) +} + +func (cr *CachingCopyCommand) FilesToSnapshot() []string { + f := cr.extractedFiles + logrus.Debugf("%d files extracted by caching copy command", len(f)) + logrus.Tracef("Extracted files: %s", f) + + return f +} + +func (cr *CachingCopyCommand) MetadataOnly() bool { + return false +} + +func (cr *CachingCopyCommand) String() string { + if cr.cmd == nil { + return "nil command" + } + return cr.cmd.String() +} + +func (cr *CachingCopyCommand) From() string { + return cr.cmd.From +} + +func resolveIfSymlink(destPath string) (string, error) { + if !filepath.IsAbs(destPath) { + return "", errors.New("dest path must be abs") + } + + var nonexistentPaths []string + + newPath := destPath + for newPath != "/" { + _, err := os.Lstat(newPath) + if err != nil { + if os.IsNotExist(err) { + dir, file := filepath.Split(newPath) + newPath = filepath.Clean(dir) + nonexistentPaths = append(nonexistentPaths, file) + continue + } else { + return "", errors.Wrap(err, "failed to lstat") + } + } + + newPath, err = filepath.EvalSymlinks(newPath) + if err != nil { + return "", errors.Wrap(err, "failed to eval symlinks") + } + break + } + + for i := len(nonexistentPaths) - 1; i >= 0; i-- { + newPath = filepath.Join(newPath, nonexistentPaths[i]) + } + + if destPath != newPath { + logrus.Tracef("Updating destination path from %v to %v due to symlink", destPath, newPath) + } + + return filepath.Clean(newPath), nil +} + +func copyCmdFilesUsedFromContext( + config *v1.Config, buildArgs *dockerfile.BuildArgs, cmd *instructions.CopyCommand, + fileContext util.FileContext, +) ([]string, error) { + // We don't use the context if we're performing a copy --from. + if cmd.From != "" { + return nil, nil + } + + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + + srcs, _, err := util.ResolveEnvAndWildcards( + cmd.SourcesAndDest, fileContext, replacementEnvs, + ) + if err != nil { + return nil, err + } + + files := []string{} + for _, src := range srcs { + fullPath := filepath.Join(fileContext.Root, src) + files = append(files, fullPath) + } + + logrus.Debugf("Using files from context: %v", files) + + return files, nil +} + +// AbstractCopyCommand can either be a CopyCommand or a CachingCopyCommand. +type AbstractCopyCommand interface { + From() string +} + +// CastAbstractCopyCommand tries to convert a command to an AbstractCopyCommand. +func CastAbstractCopyCommand(cmd interface{}) (AbstractCopyCommand, bool) { + switch v := cmd.(type) { + case *CopyCommand: + return v, true + case *CachingCopyCommand: + return v, true + } + + return nil, false +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/entrypoint.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/entrypoint.go new file mode 100644 index 000000000..83964d4fa --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/entrypoint.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "strings" + + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + v1 "github.com/google/go-containerregistry/pkg/v1" + + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +type EntrypointCommand struct { + BaseCommand + cmd *instructions.EntrypointCommand +} + +// ExecuteCommand handles command processing similar to CMD and RUN, +func (e *EntrypointCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + var newCommand []string + if e.cmd.PrependShell { + // This is the default shell on Linux + var shell []string + if len(config.Shell) > 0 { + shell = config.Shell + } else { + shell = append(shell, "/bin/sh", "-c") + } + + newCommand = append(shell, strings.Join(e.cmd.CmdLine, " ")) + } else { + newCommand = e.cmd.CmdLine + } + + config.Entrypoint = newCommand + return nil +} + +// String returns some information about the command for the image config history +func (e *EntrypointCommand) String() string { + return e.cmd.String() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/env.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/env.go new file mode 100644 index 000000000..2be271490 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/env.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + v1 "github.com/google/go-containerregistry/pkg/v1" + + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +type EnvCommand struct { + BaseCommand + cmd *instructions.EnvCommand +} + +func (e *EnvCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + newEnvs := e.cmd.Env + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + return util.UpdateConfigEnv(newEnvs, config, replacementEnvs) +} + +// String returns some information about the command for the image config history +func (e *EnvCommand) String() string { + return e.cmd.String() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/expose.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/expose.go new file mode 100644 index 000000000..0aacbb6cf --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/expose.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "fmt" + "strings" + + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + v1 "github.com/google/go-containerregistry/pkg/v1" + + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/sirupsen/logrus" +) + +type ExposeCommand struct { + BaseCommand + cmd *instructions.ExposeCommand +} + +func (r *ExposeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + logrus.Info("cmd: EXPOSE") + // Grab the currently exposed ports + existingPorts := config.ExposedPorts + if existingPorts == nil { + existingPorts = make(map[string]struct{}) + } + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + // Add any new ones in + for _, p := range r.cmd.Ports { + // Resolve any environment variables + p, err := util.ResolveEnvironmentReplacement(p, replacementEnvs, false) + if err != nil { + return err + } + // Add the default protocol if one isn't specified + if !strings.Contains(p, "/") { + p = p + "/tcp" + } + protocol := strings.Split(p, "/")[1] + if !validProtocol(protocol) { + return fmt.Errorf("invalid protocol: %s", protocol) + } + logrus.Infof("Adding exposed port: %s", p) + existingPorts[p] = struct{}{} + } + config.ExposedPorts = existingPorts + return nil +} + +func validProtocol(protocol string) bool { + validProtocols := [2]string{"tcp", "udp"} + for _, p := range validProtocols { + if protocol == p { + return true + } + } + return false +} + +func (r *ExposeCommand) String() string { + return r.cmd.String() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/fake_commands.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/fake_commands.go new file mode 100644 index 000000000..8efee7c4d --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/fake_commands.go @@ -0,0 +1,88 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// used for testing in the commands package +package commands + +import ( + "bytes" + "io" + "io/ioutil" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type fakeLayer struct { + TarContent []byte +} + +func (f fakeLayer) Digest() (v1.Hash, error) { + return v1.Hash{}, nil +} +func (f fakeLayer) DiffID() (v1.Hash, error) { + return v1.Hash{}, nil +} +func (f fakeLayer) Compressed() (io.ReadCloser, error) { + return nil, nil +} +func (f fakeLayer) Uncompressed() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(f.TarContent)), nil +} +func (f fakeLayer) Size() (int64, error) { + return 0, nil +} +func (f fakeLayer) MediaType() (types.MediaType, error) { + return "", nil +} + +type fakeImage struct { + ImageLayers []v1.Layer +} + +func (f fakeImage) Layers() ([]v1.Layer, error) { + return f.ImageLayers, nil +} +func (f fakeImage) MediaType() (types.MediaType, error) { + return "", nil +} +func (f fakeImage) Size() (int64, error) { + return 0, nil +} +func (f fakeImage) ConfigName() (v1.Hash, error) { + return v1.Hash{}, nil +} +func (f fakeImage) ConfigFile() (*v1.ConfigFile, error) { + return &v1.ConfigFile{}, nil +} +func (f fakeImage) RawConfigFile() ([]byte, error) { + return []byte{}, nil +} +func (f fakeImage) Digest() (v1.Hash, error) { + return v1.Hash{}, nil +} +func (f fakeImage) Manifest() (*v1.Manifest, error) { + return &v1.Manifest{}, nil +} +func (f fakeImage) RawManifest() ([]byte, error) { + return []byte{}, nil +} +func (f fakeImage) LayerByDigest(v1.Hash) (v1.Layer, error) { + return fakeLayer{}, nil +} +func (f fakeImage) LayerByDiffID(v1.Hash) (v1.Layer, error) { + return fakeLayer{}, nil +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/healthcheck.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/healthcheck.go new file mode 100644 index 000000000..af40ba652 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/healthcheck.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +type HealthCheckCommand struct { + BaseCommand + cmd *instructions.HealthCheckCommand +} + +// ExecuteCommand handles command processing similar to CMD and RUN, +func (h *HealthCheckCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + check := v1.HealthConfig(*h.cmd.Health) + config.Healthcheck = &check + + return nil +} + +// String returns some information about the command for the image config history +func (h *HealthCheckCommand) String() string { + return h.cmd.String() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/label.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/label.go new file mode 100644 index 000000000..7568886c0 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/label.go @@ -0,0 +1,71 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + v1 "github.com/google/go-containerregistry/pkg/v1" + + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/sirupsen/logrus" +) + +type LabelCommand struct { + BaseCommand + cmd *instructions.LabelCommand +} + +func (r *LabelCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + return updateLabels(r.cmd.Labels, config, buildArgs) +} + +func updateLabels(labels []instructions.KeyValuePair, config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + existingLabels := config.Labels + if existingLabels == nil { + existingLabels = make(map[string]string) + } + // Let's unescape values before setting the label + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + for index, kvp := range labels { + key, err := util.ResolveEnvironmentReplacement(kvp.Key, replacementEnvs, false) + if err != nil { + return err + } + unescaped, err := util.ResolveEnvironmentReplacement(kvp.Value, replacementEnvs, false) + if err != nil { + return err + } + labels[index] = instructions.KeyValuePair{ + Key: key, + Value: unescaped, + } + } + for _, kvp := range labels { + logrus.Infof("Applying label %s=%s", kvp.Key, kvp.Value) + existingLabels[kvp.Key] = kvp.Value + } + + config.Labels = existingLabels + return nil + +} + +// String returns some information about the command for the image config history +func (r *LabelCommand) String() string { + return r.cmd.String() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/onbuild.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/onbuild.go new file mode 100644 index 000000000..74c6ad01e --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/onbuild.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/sirupsen/logrus" +) + +type OnBuildCommand struct { + BaseCommand + cmd *instructions.OnbuildCommand +} + +//ExecuteCommand adds the specified expression in Onbuild to the config +func (o *OnBuildCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + logrus.Info("cmd: ONBUILD") + logrus.Infof("args: %s", o.cmd.Expression) + if config.OnBuild == nil { + config.OnBuild = []string{o.cmd.Expression} + } else { + config.OnBuild = append(config.OnBuild, o.cmd.Expression) + } + return nil +} + +// String returns some information about the command for the image config history +func (o *OnBuildCommand) String() string { + return o.cmd.String() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/run.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/run.go new file mode 100644 index 000000000..27b05ed45 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/run.go @@ -0,0 +1,264 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "fmt" + "os" + "os/exec" + "os/user" + "strings" + "syscall" + + kConfig "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/GoogleContainerTools/kaniko/pkg/constants" + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/util" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type RunCommand struct { + BaseCommand + cmd *instructions.RunCommand +} + +// for testing +var ( + userLookup = user.Lookup + userLookupID = user.LookupId +) + +func (r *RunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + return runCommandInExec(config, buildArgs, r.cmd) +} + +func runCommandInExec(config *v1.Config, buildArgs *dockerfile.BuildArgs, cmdRun *instructions.RunCommand) error { + var newCommand []string + if cmdRun.PrependShell { + // This is the default shell on Linux + var shell []string + if len(config.Shell) > 0 { + shell = config.Shell + } else { + shell = append(shell, "/bin/sh", "-c") + } + + newCommand = append(shell, strings.Join(cmdRun.CmdLine, " ")) + } else { + newCommand = cmdRun.CmdLine + // Find and set absolute path of executable by setting PATH temporary + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + for _, v := range replacementEnvs { + entry := strings.SplitN(v, "=", 2) + if entry[0] != "PATH" { + continue + } + oldPath := os.Getenv("PATH") + defer os.Setenv("PATH", oldPath) + os.Setenv("PATH", entry[1]) + path, err := exec.LookPath(newCommand[0]) + if err == nil { + newCommand[0] = path + } + } + } + + logrus.Infof("cmd: %s", newCommand[0]) + logrus.Infof("args: %s", newCommand[1:]) + + cmd := exec.Command(newCommand[0], newCommand[1:]...) + + cmd.Dir = setWorkDirIfExists(config.WorkingDir) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} + + u := config.User + userAndGroup := strings.Split(u, ":") + userStr, err := util.ResolveEnvironmentReplacement(userAndGroup[0], replacementEnvs, false) + if err != nil { + return errors.Wrapf(err, "resolving user %s", userAndGroup[0]) + } + + // If specified, run the command as a specific user + if userStr != "" { + cmd.SysProcAttr.Credential, err = util.SyscallCredentials(userStr) + if err != nil { + return errors.Wrap(err, "credentials") + } + } + + env, err := addDefaultHOME(userStr, replacementEnvs) + if err != nil { + return errors.Wrap(err, "adding default HOME variable") + } + + cmd.Env = env + + logrus.Infof("Running: %s", cmd.Args) + if err := cmd.Start(); err != nil { + return errors.Wrap(err, "starting command") + } + + pgid, err := syscall.Getpgid(cmd.Process.Pid) + if err != nil { + return errors.Wrap(err, "getting group id for process") + } + if err := cmd.Wait(); err != nil { + return errors.Wrap(err, "waiting for process to exit") + } + + //it's not an error if there are no grandchildren + if err := syscall.Kill(-pgid, syscall.SIGKILL); err != nil && err.Error() != "no such process" { + return err + } + return nil +} + +// addDefaultHOME adds the default value for HOME if it isn't already set +func addDefaultHOME(u string, envs []string) ([]string, error) { + for _, env := range envs { + split := strings.SplitN(env, "=", 2) + if split[0] == constants.HOME { + return envs, nil + } + } + + // If user isn't set, set default value of HOME + if u == "" || u == constants.RootUser { + return append(envs, fmt.Sprintf("%s=%s", constants.HOME, constants.DefaultHOMEValue)), nil + } + + // If user is set to username, set value of HOME to /home/${user} + // Otherwise the user is set to uid and HOME is / + userObj, err := userLookup(u) + if err != nil { + if uo, e := userLookupID(u); e == nil { + userObj = uo + } else { + return nil, err + } + } + + return append(envs, fmt.Sprintf("%s=%s", constants.HOME, userObj.HomeDir)), nil +} + +// String returns some information about the command for the image config +func (r *RunCommand) String() string { + return r.cmd.String() +} + +func (r *RunCommand) FilesToSnapshot() []string { + return nil +} + +func (r *RunCommand) ProvidesFilesToSnapshot() bool { + return false +} + +// CacheCommand returns true since this command should be cached +func (r *RunCommand) CacheCommand(img v1.Image) DockerCommand { + + return &CachingRunCommand{ + img: img, + cmd: r.cmd, + extractFn: util.ExtractFile, + } +} + +func (r *RunCommand) MetadataOnly() bool { + return false +} + +func (r *RunCommand) RequiresUnpackedFS() bool { + return true +} + +func (r *RunCommand) ShouldCacheOutput() bool { + return true +} + +type CachingRunCommand struct { + BaseCommand + caching + img v1.Image + extractedFiles []string + cmd *instructions.RunCommand + extractFn util.ExtractFunction +} + +func (cr *CachingRunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + logrus.Infof("Found cached layer, extracting to filesystem") + var err error + + if cr.img == nil { + return errors.New(fmt.Sprintf("command image is nil %v", cr.String())) + } + + layers, err := cr.img.Layers() + if err != nil { + return errors.Wrap(err, "retrieving image layers") + } + + if len(layers) != 1 { + return errors.New(fmt.Sprintf("expected %d layers but got %d", 1, len(layers))) + } + + cr.layer = layers[0] + + cr.extractedFiles, err = util.GetFSFromLayers( + kConfig.RootDir, + layers, + util.ExtractFunc(cr.extractFn), + util.IncludeWhiteout(), + ) + if err != nil { + return errors.Wrap(err, "extracting fs from image") + } + + return nil +} + +func (cr *CachingRunCommand) FilesToSnapshot() []string { + f := cr.extractedFiles + logrus.Debugf("%d files extracted by caching run command", len(f)) + logrus.Tracef("Extracted files: %s", f) + + return f +} + +func (cr *CachingRunCommand) String() string { + if cr.cmd == nil { + return "nil command" + } + return cr.cmd.String() +} + +func (cr *CachingRunCommand) MetadataOnly() bool { + return false +} + +func setWorkDirIfExists(workdir string) string { + if _, err := os.Lstat(workdir); err == nil { + return workdir + } + return "" +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/run_marker.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/run_marker.go new file mode 100644 index 000000000..0a446cc15 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/run_marker.go @@ -0,0 +1,85 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "os" + + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/util" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/sirupsen/logrus" +) + +type RunMarkerCommand struct { + BaseCommand + cmd *instructions.RunCommand + Files []string +} + +func (r *RunMarkerCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + // run command `touch filemarker` + logrus.Debugf("using new RunMarker command") + prevFilesMap, _ := util.GetFSInfoMap("/", map[string]os.FileInfo{}) + if err := runCommandInExec(config, buildArgs, r.cmd); err != nil { + return err + } + _, r.Files = util.GetFSInfoMap("/", prevFilesMap) + + logrus.Debugf("files changed %s", r.Files) + return nil +} + +// String returns some information about the command for the image config +func (r *RunMarkerCommand) String() string { + return r.cmd.String() +} + +func (r *RunMarkerCommand) FilesToSnapshot() []string { + return r.Files +} + +func (r *RunMarkerCommand) ProvidesFilesToSnapshot() bool { + return true +} + +// CacheCommand returns true since this command should be cached +func (r *RunMarkerCommand) CacheCommand(img v1.Image) DockerCommand { + + return &CachingRunCommand{ + img: img, + cmd: r.cmd, + extractFn: util.ExtractFile, + } +} + +func (r *RunMarkerCommand) MetadataOnly() bool { + return false +} + +func (r *RunMarkerCommand) RequiresUnpackedFS() bool { + return true +} + +func (r *RunMarkerCommand) ShouldCacheOutput() bool { + return true +} + +func (r *RunMarkerCommand) ShouldDetectDeletedFiles() bool { + return true +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/shell.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/shell.go new file mode 100644 index 000000000..996c928fd --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/shell.go @@ -0,0 +1,39 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +type ShellCommand struct { + BaseCommand + cmd *instructions.ShellCommand +} + +// ExecuteCommand handles command processing similar to CMD and RUN, +func (s *ShellCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + config.Shell = s.cmd.Shell + return nil +} + +// String returns some information about the command for the image config history +func (s *ShellCommand) String() string { + return s.cmd.String() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/stopsignal.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/stopsignal.go new file mode 100644 index 000000000..b28d8eba3 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/stopsignal.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/docker/docker/pkg/signal" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/sirupsen/logrus" +) + +type StopSignalCommand struct { + BaseCommand + cmd *instructions.StopSignalCommand +} + +// ExecuteCommand handles command processing similar to CMD and RUN, +func (s *StopSignalCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + logrus.Info("cmd: STOPSIGNAL") + + // resolve possible environment variables + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + resolvedEnvs, err := util.ResolveEnvironmentReplacementList([]string{s.cmd.Signal}, replacementEnvs, false) + if err != nil { + return err + } + stopsignal := resolvedEnvs[0] + + // validate stopsignal + _, err = signal.ParseSignal(stopsignal) + if err != nil { + return err + } + + logrus.Infof("Replacing StopSignal in config with %v", stopsignal) + config.StopSignal = stopsignal + return nil +} + +// String returns some information about the command for the image config history +func (s *StopSignalCommand) String() string { + return s.cmd.String() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/user.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/user.go new file mode 100644 index 000000000..410bb7840 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/user.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "fmt" + "strings" + + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/util" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// for testing +var ( + Lookup = util.Lookup +) + +type UserCommand struct { + BaseCommand + cmd *instructions.UserCommand +} + +func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + logrus.Info("cmd: USER") + u := r.cmd.User + userAndGroup := strings.Split(u, ":") + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + userStr, err := util.ResolveEnvironmentReplacement(userAndGroup[0], replacementEnvs, false) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("resolving user %s", userAndGroup[0])) + } + + if len(userAndGroup) > 1 { + groupStr, err := util.ResolveEnvironmentReplacement(userAndGroup[1], replacementEnvs, false) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("resolving group %s", userAndGroup[1])) + } + userStr = userStr + ":" + groupStr + } + + config.User = userStr + return nil +} + +func (r *UserCommand) String() string { + return r.cmd.String() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/volume.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/volume.go new file mode 100644 index 000000000..94acd09e1 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/volume.go @@ -0,0 +1,72 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "fmt" + "os" + + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + + "github.com/GoogleContainerTools/kaniko/pkg/util" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/sirupsen/logrus" +) + +type VolumeCommand struct { + BaseCommand + cmd *instructions.VolumeCommand +} + +func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + logrus.Info("cmd: VOLUME") + volumes := v.cmd.Volumes + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + resolvedVolumes, err := util.ResolveEnvironmentReplacementList(volumes, replacementEnvs, true) + if err != nil { + return err + } + existingVolumes := config.Volumes + if existingVolumes == nil { + existingVolumes = map[string]struct{}{} + } + for _, volume := range resolvedVolumes { + var x struct{} + existingVolumes[volume] = x + util.AddVolumePathToIgnoreList(volume) + + // Only create and snapshot the dir if it didn't exist already + if _, err := os.Stat(volume); os.IsNotExist(err) { + logrus.Infof("Creating directory %s", volume) + if err := os.MkdirAll(volume, 0755); err != nil { + return fmt.Errorf("could not create directory for volume %s: %s", volume, err) + } + } + } + config.Volumes = existingVolumes + + return nil +} + +func (v *VolumeCommand) FilesToSnapshot() []string { + return []string{} +} + +func (v *VolumeCommand) String() string { + return v.cmd.String() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/workdir.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/workdir.go new file mode 100644 index 000000000..153f27c92 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/commands/workdir.go @@ -0,0 +1,81 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "os" + "path/filepath" + + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + + "github.com/GoogleContainerTools/kaniko/pkg/util" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/sirupsen/logrus" +) + +type WorkdirCommand struct { + BaseCommand + cmd *instructions.WorkdirCommand + snapshotFiles []string +} + +// For testing +var mkdir = os.MkdirAll + +func (w *WorkdirCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + logrus.Info("cmd: workdir") + workdirPath := w.cmd.Path + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + resolvedWorkingDir, err := util.ResolveEnvironmentReplacement(workdirPath, replacementEnvs, true) + if err != nil { + return err + } + if filepath.IsAbs(resolvedWorkingDir) { + config.WorkingDir = resolvedWorkingDir + } else { + if config.WorkingDir != "" { + config.WorkingDir = filepath.Join(config.WorkingDir, resolvedWorkingDir) + } else { + config.WorkingDir = filepath.Join("/", resolvedWorkingDir) + } + } + logrus.Infof("Changed working directory to %s", config.WorkingDir) + + // Only create and snapshot the dir if it didn't exist already + w.snapshotFiles = []string{} + if _, err := os.Stat(config.WorkingDir); os.IsNotExist(err) { + logrus.Infof("Creating directory %s", config.WorkingDir) + w.snapshotFiles = append(w.snapshotFiles, config.WorkingDir) + return mkdir(config.WorkingDir, 0755) + } + return nil +} + +// FilesToSnapshot returns the workingdir, which should have been created if it didn't already exist +func (w *WorkdirCommand) FilesToSnapshot() []string { + return w.snapshotFiles +} + +// String returns some information about the command for the image config history +func (w *WorkdirCommand) String() string { + return w.cmd.String() +} + +func (w *WorkdirCommand) MetadataOnly() bool { + return false +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/config/args.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/config/args.go new file mode 100644 index 000000000..46ec51f7c --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/config/args.go @@ -0,0 +1,84 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "strings" + + "github.com/sirupsen/logrus" +) + +// This type is used to supported passing in multiple flags +type multiArg []string + +// Now, for our new type, implement the two methods of +// the flag.Value interface... +// The first method is String() string +func (b *multiArg) String() string { + return strings.Join(*b, ",") +} + +// The second method is Set(value string) error +func (b *multiArg) Set(value string) error { + logrus.Debugf("appending to multi args %s", value) + *b = append(*b, value) + return nil +} + +// The third is Type() string +func (b *multiArg) Type() string { + return "multi-arg type" +} + +func (b *multiArg) Contains(v string) bool { + for _, s := range *b { + if s == v { + return true + } + } + return false +} + +// This type is used to supported passing in multiple key=value flags +type keyValueArg map[string]string + +// Now, for our new type, implement the two methods of +// the flag.Value interface... +// The first method is String() string +func (a *keyValueArg) String() string { + var result []string + for key := range *a { + result = append(result, fmt.Sprintf("%s=%s", key, (*a)[key])) + } + return strings.Join(result, ",") +} + +// The second method is Set(value string) error +func (a *keyValueArg) Set(value string) error { + valueSplit := strings.SplitN(value, "=", 2) + if len(valueSplit) < 2 { + return fmt.Errorf("invalid argument value. expect key=value, got %s", value) + } + (*a)[valueSplit[0]] = valueSplit[1] + return nil +} + +// The third is Type() string +func (a *keyValueArg) Type() string { + return "key-value-arg type" +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/config/init.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/config/init.go new file mode 100644 index 000000000..2e08b975c --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/config/init.go @@ -0,0 +1,31 @@ +/* +Copyright 2020 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/constants" +) + +var RootDir string +var KanikoDir string +var IgnoreListPath string + +func init() { + RootDir = constants.RootDir + KanikoDir = constants.KanikoDir + IgnoreListPath = constants.IgnoreListPath +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/config/options.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/config/options.go new file mode 100644 index 000000000..3482ead0e --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/config/options.go @@ -0,0 +1,128 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// CacheOptions are base image cache options that are set by command line arguments +type CacheOptions struct { + CacheDir string + CacheTTL time.Duration +} + +// RegistryOptions are all the options related to the registries, set by command line arguments. +type RegistryOptions struct { + RegistryMirrors multiArg + InsecureRegistries multiArg + SkipTLSVerifyRegistries multiArg + RegistriesCertificates keyValueArg + Insecure bool + SkipTLSVerify bool + InsecurePull bool + SkipTLSVerifyPull bool + PushRetry int +} + +// KanikoOptions are options that are set by command line arguments +type KanikoOptions struct { + RegistryOptions + CacheOptions + Destinations multiArg + BuildArgs multiArg + Labels multiArg + Git KanikoGitOptions + IgnorePaths multiArg + DockerfilePath string + SrcContext string + SnapshotMode string + CustomPlatform string + Bucket string + TarPath string + Target string + CacheRepo string + DigestFile string + ImageNameDigestFile string + ImageNameTagDigestFile string + OCILayoutPath string + ImageFSExtractRetry int + SingleSnapshot bool + Reproducible bool + NoPush bool + Cache bool + Cleanup bool + CompressedCaching bool + IgnoreVarRun bool + SkipUnusedStages bool + RunV2 bool + CacheCopyLayers bool + ForceBuildMetadata bool +} + +type KanikoGitOptions struct { + Branch string + SingleBranch bool + RecurseSubmodules bool +} + +var ErrInvalidGitFlag = errors.New("invalid git flag, must be in the key=value format") + +func (k *KanikoGitOptions) Type() string { + return "gitoptions" +} + +func (k *KanikoGitOptions) String() string { + return fmt.Sprintf("branch=%s,single-branch=%t,recurse-submodules=%t", k.Branch, k.SingleBranch, k.RecurseSubmodules) +} + +func (k *KanikoGitOptions) Set(s string) error { + var parts = strings.SplitN(s, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("%w: %s", ErrInvalidGitFlag, s) + } + switch parts[0] { + case "branch": + k.Branch = parts[1] + case "single-branch": + v, err := strconv.ParseBool(parts[1]) + if err != nil { + return err + } + k.SingleBranch = v + case "recurse-submodules": + v, err := strconv.ParseBool(parts[1]) + if err != nil { + return err + } + k.RecurseSubmodules = v + } + return nil +} + +// WarmerOptions are options that are set by command line arguments to the cache warmer. +type WarmerOptions struct { + CacheOptions + RegistryOptions + CustomPlatform string + Images multiArg + Force bool +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/config/stage.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/config/stage.go new file mode 100644 index 000000000..fad18049c --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/config/stage.go @@ -0,0 +1,32 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +// KanikoStage wraps a stage of the Dockerfile and provides extra information +type KanikoStage struct { + instructions.Stage + BaseImageIndex int + Final bool + BaseImageStoredLocally bool + SaveStage bool + MetaArgs []instructions.ArgCommand + Index int +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/constants/constants.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/constants/constants.go new file mode 100644 index 000000000..a1df904b4 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/constants/constants.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package constants + +const ( + // RootDir is the path to the root directory + RootDir = "/" + + //KanikoDir is the path to the Kaniko directory + KanikoDir = "/kaniko" + + IgnoreListPath = "/proc/self/mountinfo" + + Author = "kaniko" + + // DockerfilePath is the path the Dockerfile is copied to + DockerfilePath = "/kaniko/Dockerfile" + + // ContextTar is the default name of the tar uploaded to GCS buckets + ContextTar = "context.tar.gz" + + // BuildContextDir is the directory a build context will be unpacked into, + // for example, a tarball from a GCS bucket will be unpacked here + BuildContextDir = "/kaniko/buildcontext/" + + // KanikoIntermediateStagesDir is where we will store intermediate stages + // as tarballs in case they are needed later on + KanikoIntermediateStagesDir = "/kaniko/stages" + + // Various snapshot modes: + SnapshotModeTime = "time" + SnapshotModeFull = "full" + SnapshotModeRedo = "redo" + + // NoBaseImage is the scratch image + NoBaseImage = "scratch" + + GCSBuildContextPrefix = "gs://" + S3BuildContextPrefix = "s3://" + LocalDirBuildContextPrefix = "dir://" + GitBuildContextPrefix = "git://" + HTTPSBuildContextPrefix = "https://" + + HOME = "HOME" + // DefaultHOMEValue is the default value Docker sets for $HOME + DefaultHOMEValue = "/root" + RootUser = "root" + + // Docker command names + Cmd = "cmd" + Entrypoint = "entrypoint" + + // Name of the .dockerignore file + Dockerignore = ".dockerignore" + + // S3 Custom endpoint ENV name + S3EndpointEnv = "S3_ENDPOINT" + S3ForcePathStyle = "S3_FORCE_PATH_STYLE" +) + +// ScratchEnvVars are the default environment variables needed for a scratch image. +var ScratchEnvVars = []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"} + +// AzureBlobStorageHostRegEx is ReqEX for Valid azure blob storage host suffix in url for AzureCloud, AzureChinaCloud, AzureGermanCloud and AzureUSGovernment +var AzureBlobStorageHostRegEx = []string{"https://(.+?).blob.core.windows.net/(.+)", + "https://(.+?).blob.core.chinacloudapi.cn/(.+)", + "https://(.+?).blob.core.cloudapi.de/(.+)", + "https://(.+?).blob.core.usgovcloudapi.net/(.+)", +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/creds/creds_darwin.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/creds/creds_darwin.go new file mode 100644 index 000000000..e3bdb2c94 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/creds/creds_darwin.go @@ -0,0 +1,36 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package creds + +import ( + "sync" + + "github.com/google/go-containerregistry/pkg/authn" +) + +var ( + setupKeyChainOnce sync.Once + keyChain authn.Keychain +) + +// GetKeychain returns a keychain for accessing container registries. +func GetKeychain() authn.Keychain { + setupKeyChainOnce.Do(func() { + keyChain = authn.NewMultiKeychain(authn.DefaultKeychain) + }) + return keyChain +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/creds/creds_linux.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/creds/creds_linux.go new file mode 100644 index 000000000..2cbe62682 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/creds/creds_linux.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package creds + +import ( + "context" + "sync" + + "github.com/genuinetools/bpfd/proc" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/authn/k8schain" + "github.com/sirupsen/logrus" +) + +var ( + setupKeyChainOnce sync.Once + keyChain authn.Keychain +) + +// GetKeychain returns a keychain for accessing container registries. +func GetKeychain() authn.Keychain { + setupKeyChainOnce.Do(func() { + keyChain = authn.NewMultiKeychain(authn.DefaultKeychain) + + // Add the Kubernetes keychain if we're on Kubernetes + if proc.GetContainerRuntime(0, 0) == proc.RuntimeKubernetes { + k8sc, err := k8schain.NewNoClient(context.Background()) + if err != nil { + logrus.Warnf("Error setting up k8schain. Using default keychain %s", err) + return + } + keyChain = authn.NewMultiKeychain(keyChain, k8sc) + } + }) + return keyChain +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/dockerfile/buildargs.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/dockerfile/buildargs.go new file mode 100644 index 000000000..dac5fd08f --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/dockerfile/buildargs.go @@ -0,0 +1,64 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockerfile + +import ( + "strings" + + d "github.com/docker/docker/builder/dockerfile" + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +type BuildArgs struct { + d.BuildArgs +} + +func NewBuildArgs(args []string) *BuildArgs { + argsFromOptions := make(map[string]*string) + for _, a := range args { + s := strings.SplitN(a, "=", 2) + if len(s) == 1 { + argsFromOptions[s[0]] = nil + } else { + argsFromOptions[s[0]] = &s[1] + } + } + return &BuildArgs{ + BuildArgs: *d.NewBuildArgs(argsFromOptions), + } +} + +func (b *BuildArgs) Clone() *BuildArgs { + clone := b.BuildArgs.Clone() + return &BuildArgs{ + BuildArgs: *clone, + } +} + +// ReplacementEnvs returns a list of filtered environment variables +func (b *BuildArgs) ReplacementEnvs(envs []string) []string { + filtered := b.FilterAllowed(envs) + return append(envs, filtered...) +} + +// AddMetaArgs adds the supplied args map to b's allowedMetaArgs +func (b *BuildArgs) AddMetaArgs(metaArgs []instructions.ArgCommand) { + for _, arg := range metaArgs { + v := arg.Value + b.AddMetaArg(arg.Key, v) + } +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/dockerfile/dockerfile.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/dockerfile/dockerfile.go new file mode 100644 index 000000000..777178f0c --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/dockerfile/dockerfile.go @@ -0,0 +1,392 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockerfile + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/sirupsen/logrus" + + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/pkg/errors" +) + +func ParseStages(opts *config.KanikoOptions) ([]instructions.Stage, []instructions.ArgCommand, error) { + var err error + var d []uint8 + match, _ := regexp.MatchString("^https?://", opts.DockerfilePath) + if match { + response, e := http.Get(opts.DockerfilePath) + if e != nil { + return nil, nil, e + } + d, err = ioutil.ReadAll(response.Body) + } else { + d, err = ioutil.ReadFile(opts.DockerfilePath) + } + + if err != nil { + return nil, nil, errors.Wrap(err, fmt.Sprintf("reading dockerfile at path %s", opts.DockerfilePath)) + } + + stages, metaArgs, err := Parse(d) + if err != nil { + return nil, nil, errors.Wrap(err, "parsing dockerfile") + } + + metaArgs, err = expandNested(metaArgs, opts.BuildArgs) + if err != nil { + return nil, nil, errors.Wrap(err, "expanding meta ARGs") + } + + return stages, metaArgs, nil +} + +// baseImageIndex returns the index of the stage the current stage is built off +// returns -1 if the current stage isn't built off a previous stage +func baseImageIndex(currentStage int, stages []instructions.Stage) int { + currentStageBaseName := strings.ToLower(stages[currentStage].BaseName) + + for i, stage := range stages { + if i > currentStage { + break + } + if stage.Name == currentStageBaseName { + return i + } + } + + return -1 +} + +// Parse parses the contents of a Dockerfile and returns a list of commands +func Parse(b []byte) ([]instructions.Stage, []instructions.ArgCommand, error) { + p, err := parser.Parse(bytes.NewReader(b)) + if err != nil { + return nil, nil, err + } + stages, metaArgs, err := instructions.Parse(p.AST) + if err != nil { + return nil, nil, err + } + + metaArgs, err = stripEnclosingQuotes(metaArgs) + if err != nil { + return nil, nil, err + } + + return stages, metaArgs, nil +} + +// expandNestedArgs tries to resolve nested ARG value against the previously defined ARGs +func expandNested(metaArgs []instructions.ArgCommand, buildArgs []string) ([]instructions.ArgCommand, error) { + prevArgs := make([]string, 0) + for i := range metaArgs { + arg := metaArgs[i] + v := arg.Value + if v != nil { + val, err := util.ResolveEnvironmentReplacement(*v, append(prevArgs, buildArgs...), false) + if err != nil { + return nil, err + } + prevArgs = append(prevArgs, arg.Key+"="+val) + arg.Value = &val + metaArgs[i] = arg + } + } + return metaArgs, nil +} + +// stripEnclosingQuotes removes quotes enclosing the value of each instructions.ArgCommand in a slice +// if the quotes are escaped it leaves them +func stripEnclosingQuotes(metaArgs []instructions.ArgCommand) ([]instructions.ArgCommand, error) { + for i := range metaArgs { + arg := metaArgs[i] + v := arg.Value + if v != nil { + val, err := extractValFromQuotes(*v) + if err != nil { + return nil, err + } + + arg.Value = &val + metaArgs[i] = arg + } + } + return metaArgs, nil +} + +func extractValFromQuotes(val string) (string, error) { + backSlash := byte('\\') + if len(val) < 2 { + return val, nil + } + + var leader string + var tail string + + switch char := val[0]; char { + case '\'', '"': + leader = string([]byte{char}) + case backSlash: + switch char := val[1]; char { + case '\'', '"': + leader = string([]byte{backSlash, char}) + } + } + + // If the length of leader is greater than one then it must be an escaped + // character. + if len(leader) < 2 { + switch char := val[len(val)-1]; char { + case '\'', '"': + tail = string([]byte{char}) + } + } else { + switch char := val[len(val)-2:]; char { + case `\'`, `\"`: + tail = char + } + } + + if leader != tail { + logrus.Infof("leader %s tail %s", leader, tail) + return "", errors.New("quotes wrapping arg values must be matched") + } + + if leader == "" { + return val, nil + } + + if len(leader) == 2 { + return val, nil + } + + return val[1 : len(val)-1], nil +} + +// targetStage returns the index of the target stage kaniko is trying to build +func targetStage(stages []instructions.Stage, target string) (int, error) { + if target == "" { + return len(stages) - 1, nil + } + for i, stage := range stages { + if stage.Name == target { + return i, nil + } + } + return -1, fmt.Errorf("%s is not a valid target build stage", target) +} + +// ParseCommands parses an array of commands into an array of instructions.Command; used for onbuild +func ParseCommands(cmdArray []string) ([]instructions.Command, error) { + var cmds []instructions.Command + cmdString := strings.Join(cmdArray, "\n") + ast, err := parser.Parse(strings.NewReader(cmdString)) + if err != nil { + return nil, err + } + for _, child := range ast.AST.Children { + cmd, err := instructions.ParseCommand(child) + if err != nil { + return nil, err + } + cmds = append(cmds, cmd) + } + return cmds, nil +} + +// SaveStage returns true if the current stage will be needed later in the Dockerfile +func saveStage(index int, stages []instructions.Stage) bool { + currentStageName := stages[index].Name + + for stageIndex, stage := range stages { + if stageIndex <= index { + continue + } + + if strings.ToLower(stage.BaseName) == currentStageName { + if stage.BaseName != "" { + return true + } + } + } + + return false +} + +// ResolveCrossStageCommands resolves any calls to previous stages with names to indices +// Ex. --from=secondStage should be --from=1 for easier processing later on +// As third party library lowers stage name in FROM instruction, this function resolves stage case insensitively. +func ResolveCrossStageCommands(cmds []instructions.Command, stageNameToIdx map[string]string) { + for _, cmd := range cmds { + switch c := cmd.(type) { + case *instructions.CopyCommand: + if c.From != "" { + if val, ok := stageNameToIdx[strings.ToLower(c.From)]; ok { + c.From = val + } + } + } + } +} + +// resolveStagesArgs resolves all the args from list of stages +func resolveStagesArgs(stages []instructions.Stage, args []string) error { + for i, s := range stages { + resolvedBaseName, err := util.ResolveEnvironmentReplacement(s.BaseName, args, false) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("resolving base name %s", s.BaseName)) + } + if s.BaseName != resolvedBaseName { + stages[i].BaseName = resolvedBaseName + } + } + return nil +} + +func MakeKanikoStages(opts *config.KanikoOptions, stages []instructions.Stage, metaArgs []instructions.ArgCommand) ([]config.KanikoStage, error) { + targetStage, err := targetStage(stages, opts.Target) + if err != nil { + return nil, errors.Wrap(err, "Error finding target stage") + } + args := unifyArgs(metaArgs, opts.BuildArgs) + if err := resolveStagesArgs(stages, args); err != nil { + return nil, errors.Wrap(err, "resolving args") + } + if opts.SkipUnusedStages { + stages = skipUnusedStages(stages, &targetStage, opts.Target) + } + var kanikoStages []config.KanikoStage + for index, stage := range stages { + if len(stage.Name) > 0 { + logrus.Infof("Resolved base name %s to %s", stage.BaseName, stage.Name) + } + baseImageIndex := baseImageIndex(index, stages) + kanikoStages = append(kanikoStages, config.KanikoStage{ + Stage: stage, + BaseImageIndex: baseImageIndex, + BaseImageStoredLocally: (baseImageIndex != -1), + SaveStage: saveStage(index, stages), + Final: index == targetStage, + MetaArgs: metaArgs, + Index: index, + }) + if index == targetStage { + break + } + } + return kanikoStages, nil +} + +func GetOnBuildInstructions(config *v1.Config, stageNameToIdx map[string]string) ([]instructions.Command, error) { + if config.OnBuild == nil || len(config.OnBuild) == 0 { + return nil, nil + } + + cmds, err := ParseCommands(config.OnBuild) + if err != nil { + return nil, err + } + + // Iterate over commands and replace references to other stages with their index + ResolveCrossStageCommands(cmds, stageNameToIdx) + return cmds, nil +} + +// unifyArgs returns the unified args between metaArgs and --build-arg +// by default --build-arg overrides metaArgs except when --build-arg is empty +func unifyArgs(metaArgs []instructions.ArgCommand, buildArgs []string) []string { + argsMap := make(map[string]string) + for _, a := range metaArgs { + if a.Value != nil { + argsMap[a.Key] = *a.Value + } + } + splitter := "=" + for _, a := range buildArgs { + s := strings.Split(a, splitter) + if len(s) > 1 && s[1] != "" { + argsMap[s[0]] = s[1] + } + } + var args []string + for k, v := range argsMap { + args = append(args, fmt.Sprintf("%s=%s", k, v)) + } + return args +} + +// skipUnusedStages returns the list of used stages without the unnecessaries ones +func skipUnusedStages(stages []instructions.Stage, lastStageIndex *int, target string) []instructions.Stage { + stagesDependencies := make(map[string]bool) + var onlyUsedStages []instructions.Stage + idx := *lastStageIndex + + lastStageBaseName := stages[idx].BaseName + + for i := idx; i >= 0; i-- { + s := stages[i] + if (s.Name != "" && stagesDependencies[s.Name]) || s.Name == lastStageBaseName || i == idx { + for _, c := range s.Commands { + switch cmd := c.(type) { + case *instructions.CopyCommand: + stageName := cmd.From + if copyFromIndex, err := strconv.Atoi(stageName); err == nil { + stageName = stages[copyFromIndex].Name + } + if !stagesDependencies[stageName] { + stagesDependencies[stageName] = true + } + } + } + if i != idx { + stagesDependencies[s.BaseName] = true + } + } + } + dependenciesLen := len(stagesDependencies) + if target == "" && dependenciesLen == 0 { + return stages + } else if dependenciesLen > 0 { + for i := 0; i < idx; i++ { + if stages[i].Name == "" { + continue + } + s := stages[i] + if stagesDependencies[s.Name] || s.Name == lastStageBaseName { + onlyUsedStages = append(onlyUsedStages, s) + } + } + } + onlyUsedStages = append(onlyUsedStages, stages[idx]) + if idx > len(onlyUsedStages)-1 { + *lastStageIndex = len(onlyUsedStages) - 1 + } + + return onlyUsedStages +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/executor/build.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/executor/build.go new file mode 100644 index 000000000..1f2b3aee1 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/executor/build.go @@ -0,0 +1,884 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package executor + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" + + "github.com/GoogleContainerTools/kaniko/pkg/cache" + "github.com/GoogleContainerTools/kaniko/pkg/commands" + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/GoogleContainerTools/kaniko/pkg/constants" + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + image_util "github.com/GoogleContainerTools/kaniko/pkg/image" + "github.com/GoogleContainerTools/kaniko/pkg/image/remote" + "github.com/GoogleContainerTools/kaniko/pkg/snapshot" + "github.com/GoogleContainerTools/kaniko/pkg/timing" + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/google/go-containerregistry/pkg/v1/partial" +) + +// This is the size of an empty tar in Go +const emptyTarSize = 1024 + +// for testing +var ( + initializeConfig = initConfig +) + +type cachePusher func(*config.KanikoOptions, string, string, string) error +type snapShotter interface { + Init() error + TakeSnapshotFS() (string, error) + TakeSnapshot([]string, bool, bool) (string, error) +} + +// stageBuilder contains all fields necessary to build one stage of a Dockerfile +type stageBuilder struct { + stage config.KanikoStage + image v1.Image + cf *v1.ConfigFile + baseImageDigest string + finalCacheKey string + opts *config.KanikoOptions + fileContext util.FileContext + cmds []commands.DockerCommand + args *dockerfile.BuildArgs + crossStageDeps map[int][]string + digestToCacheKey map[string]string + stageIdxToDigest map[string]string + snapshotter snapShotter + layerCache cache.LayerCache + pushLayerToCache cachePusher +} + +// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage +func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, crossStageDeps map[int][]string, dcm map[string]string, sid map[string]string, stageNameToIdx map[string]string, fileContext util.FileContext) (*stageBuilder, error) { + sourceImage, err := image_util.RetrieveSourceImage(stage, opts) + if err != nil { + return nil, err + } + + imageConfig, err := initializeConfig(sourceImage, opts) + if err != nil { + return nil, err + } + + if err := resolveOnBuild(&stage, &imageConfig.Config, stageNameToIdx); err != nil { + return nil, err + } + + err = util.InitIgnoreList(true) + if err != nil { + return nil, errors.Wrap(err, "failed to initialize ignore list") + } + + hasher, err := getHasher(opts.SnapshotMode) + if err != nil { + return nil, err + } + l := snapshot.NewLayeredMap(hasher, util.CacheHasher()) + snapshotter := snapshot.NewSnapshotter(l, config.RootDir) + + digest, err := sourceImage.Digest() + if err != nil { + return nil, err + } + s := &stageBuilder{ + stage: stage, + image: sourceImage, + cf: imageConfig, + snapshotter: snapshotter, + baseImageDigest: digest.String(), + opts: opts, + fileContext: fileContext, + crossStageDeps: crossStageDeps, + digestToCacheKey: dcm, + stageIdxToDigest: sid, + layerCache: &cache.RegistryCache{ + Opts: opts, + }, + pushLayerToCache: pushLayerToCache, + } + + for _, cmd := range s.stage.Commands { + command, err := commands.GetCommand(cmd, fileContext, opts.RunV2, opts.CacheCopyLayers) + if err != nil { + return nil, err + } + if command == nil { + continue + } + s.cmds = append(s.cmds, command) + } + + s.args = dockerfile.NewBuildArgs(s.opts.BuildArgs) + s.args.AddMetaArgs(s.stage.MetaArgs) + return s, nil +} + +func initConfig(img partial.WithConfigFile, opts *config.KanikoOptions) (*v1.ConfigFile, error) { + imageConfig, err := img.ConfigFile() + if err != nil { + return nil, err + } + + if imageConfig.Config.Env == nil { + imageConfig.Config.Env = constants.ScratchEnvVars + } + + if opts == nil { + return imageConfig, nil + } + + if l := len(opts.Labels); l > 0 { + if imageConfig.Config.Labels == nil { + imageConfig.Config.Labels = make(map[string]string) + } + for _, label := range opts.Labels { + parts := strings.SplitN(label, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("labels must be of the form key=value, got %s", label) + } + + imageConfig.Config.Labels[parts[0]] = parts[1] + } + } + + return imageConfig, nil +} + +func (s *stageBuilder) populateCompositeKey(command fmt.Stringer, files []string, compositeKey CompositeCache, args *dockerfile.BuildArgs, env []string) (CompositeCache, error) { + // First replace all the environment variables or args in the command + replacementEnvs := args.ReplacementEnvs(env) + resolvedCmd, err := util.ResolveEnvironmentReplacement(command.String(), replacementEnvs, false) + if err != nil { + return compositeKey, err + } + // Add the next command to the cache key. + compositeKey.AddKey(resolvedCmd) + if copyCmd, ok := commands.CastAbstractCopyCommand(command); ok == true { + compositeKey = s.populateCopyCmdCompositeKey(command, copyCmd.From(), compositeKey) + } + + for _, f := range files { + if err := compositeKey.AddPath(f, s.fileContext); err != nil { + return compositeKey, err + } + } + return compositeKey, nil +} + +func (s *stageBuilder) populateCopyCmdCompositeKey(command fmt.Stringer, from string, compositeKey CompositeCache) CompositeCache { + if from != "" { + digest, ok := s.stageIdxToDigest[from] + if ok { + ds := digest + cacheKey, ok := s.digestToCacheKey[ds] + if ok { + logrus.Debugf("adding digest %v from previous stage to composite key for %v", ds, command.String()) + compositeKey.AddKey(cacheKey) + } + } + } + + return compositeKey +} + +func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) error { + if !s.opts.Cache { + return nil + } + + stopCache := false + // Possibly replace commands with their cached implementations. + // We walk through all the commands, running any commands that only operate on metadata. + // We throw the metadata away after, but we need it to properly track command dependencies + // for things like COPY ${FOO} or RUN commands that use environment variables. + for i, command := range s.cmds { + if command == nil { + continue + } + files, err := command.FilesUsedFromContext(&cfg, s.args) + if err != nil { + return errors.Wrap(err, "failed to get files used from context") + } + + compositeKey, err = s.populateCompositeKey(command, files, compositeKey, s.args, cfg.Env) + if err != nil { + return err + } + + logrus.Debugf("optimize: composite key for command %v %v", command.String(), compositeKey) + ck, err := compositeKey.Hash() + if err != nil { + return errors.Wrap(err, "failed to hash composite key") + } + + logrus.Debugf("optimize: cache key for command %v %v", command.String(), ck) + s.finalCacheKey = ck + + if command.ShouldCacheOutput() && !stopCache { + img, err := s.layerCache.RetrieveLayer(ck) + + if err != nil { + logrus.Debugf("Failed to retrieve layer: %s", err) + logrus.Infof("No cached layer found for cmd %s", command.String()) + logrus.Debugf("Key missing was: %s", compositeKey.Key()) + stopCache = true + continue + } + + if cacheCmd := command.CacheCommand(img); cacheCmd != nil { + logrus.Infof("Using caching version of cmd: %s", command.String()) + s.cmds[i] = cacheCmd + } + } + + // Mutate the config for any commands that require it. + if command.MetadataOnly() { + if err := command.ExecuteCommand(&cfg, s.args); err != nil { + return err + } + } + } + return nil +} + +func (s *stageBuilder) build() error { + // Set the initial cache key to be the base image digest, the build args and the SrcContext. + var compositeKey *CompositeCache + if cacheKey, ok := s.digestToCacheKey[s.baseImageDigest]; ok { + compositeKey = NewCompositeCache(cacheKey) + } else { + compositeKey = NewCompositeCache(s.baseImageDigest) + } + + // Apply optimizations to the instructions. + if err := s.optimize(*compositeKey, s.cf.Config); err != nil { + return errors.Wrap(err, "failed to optimize instructions") + } + + // Unpack file system to root if we need to. + shouldUnpack := false + for _, cmd := range s.cmds { + if cmd.RequiresUnpackedFS() { + logrus.Infof("Unpacking rootfs as cmd %s requires it.", cmd.String()) + shouldUnpack = true + break + } + } + if len(s.crossStageDeps[s.stage.Index]) > 0 { + shouldUnpack = true + } + + if shouldUnpack { + t := timing.Start("FS Unpacking") + + retryFunc := func() error { + _, err := util.GetFSFromImage(config.RootDir, s.image, util.ExtractFile) + return err + } + + if err := util.Retry(retryFunc, s.opts.ImageFSExtractRetry, 1000); err != nil { + return errors.Wrap(err, "failed to get filesystem from image") + } + + timing.DefaultRun.Stop(t) + } else { + logrus.Info("Skipping unpacking as no commands require it.") + } + + initSnapshotTaken := false + if s.opts.SingleSnapshot || s.opts.RunV2 { + if err := s.initSnapshotWithTimings(); err != nil { + return err + } + initSnapshotTaken = true + } + + cacheGroup := errgroup.Group{} + for index, command := range s.cmds { + if command == nil { + continue + } + + t := timing.Start("Command: " + command.String()) + + // If the command uses files from the context, add them. + files, err := command.FilesUsedFromContext(&s.cf.Config, s.args) + if err != nil { + return errors.Wrap(err, "failed to get files used from context") + } + + if s.opts.Cache { + *compositeKey, err = s.populateCompositeKey(command, files, *compositeKey, s.args, s.cf.Config.Env) + if err != nil && s.opts.Cache { + return err + } + } + + logrus.Info(command.String()) + + isCacheCommand := func() bool { + switch command.(type) { + case commands.Cached: + return true + default: + return false + } + }() + if !initSnapshotTaken && !isCacheCommand && !command.ProvidesFilesToSnapshot() { + // Take initial snapshot if command does not expect to return + // a list of files. + if err := s.initSnapshotWithTimings(); err != nil { + return err + } + initSnapshotTaken = true + } + + if err := command.ExecuteCommand(&s.cf.Config, s.args); err != nil { + return errors.Wrap(err, "failed to execute command") + } + files = command.FilesToSnapshot() + timing.DefaultRun.Stop(t) + + if !s.shouldTakeSnapshot(index, command.MetadataOnly()) && !s.opts.ForceBuildMetadata { + logrus.Debugf("build: skipping snapshot for [%v]", command.String()) + continue + } + if isCacheCommand { + v := command.(commands.Cached) + layer := v.Layer() + if err := s.saveLayerToImage(layer, command.String()); err != nil { + return errors.Wrap(err, "failed to save layer") + } + } else { + tarPath, err := s.takeSnapshot(files, command.ShouldDetectDeletedFiles()) + if err != nil { + return errors.Wrap(err, "failed to take snapshot") + } + + if s.opts.Cache { + logrus.Debugf("build: composite key for command %v %v", command.String(), compositeKey) + ck, err := compositeKey.Hash() + if err != nil { + return errors.Wrap(err, "failed to hash composite key") + } + + logrus.Debugf("build: cache key for command %v %v", command.String(), ck) + + // Push layer to cache (in parallel) now along with new config file + if command.ShouldCacheOutput() { + cacheGroup.Go(func() error { + return s.pushLayerToCache(s.opts, ck, tarPath, command.String()) + }) + } + } + if err := s.saveSnapshotToImage(command.String(), tarPath); err != nil { + return errors.Wrap(err, "failed to save snapshot to image") + } + } + } + + if err := cacheGroup.Wait(); err != nil { + logrus.Warnf("error uploading layer to cache: %s", err) + } + + return nil +} + +func (s *stageBuilder) takeSnapshot(files []string, shdDelete bool) (string, error) { + var snapshot string + var err error + + t := timing.Start("Snapshotting FS") + if files == nil || s.opts.SingleSnapshot { + snapshot, err = s.snapshotter.TakeSnapshotFS() + } else { + // Volumes are very weird. They get snapshotted in the next command. + files = append(files, util.Volumes()...) + snapshot, err = s.snapshotter.TakeSnapshot(files, shdDelete, s.opts.ForceBuildMetadata) + } + timing.DefaultRun.Stop(t) + return snapshot, err +} + +func (s *stageBuilder) shouldTakeSnapshot(index int, isMetadatCmd bool) bool { + isLastCommand := index == len(s.cmds)-1 + + // We only snapshot the very end with single snapshot mode on. + if s.opts.SingleSnapshot { + return isLastCommand + } + + // Always take snapshots if we're using the cache. + if s.opts.Cache { + return true + } + + // if command is a metadata command, do not snapshot. + return !isMetadatCmd +} + +func (s *stageBuilder) saveSnapshotToImage(createdBy string, tarPath string) error { + layer, err := s.saveSnapshotToLayer(tarPath) + if err != nil { + return err + } + + if layer == nil { + return nil + } + + return s.saveLayerToImage(layer, createdBy) +} + +func (s *stageBuilder) saveSnapshotToLayer(tarPath string) (v1.Layer, error) { + if tarPath == "" { + return nil, nil + } + fi, err := os.Stat(tarPath) + if err != nil { + return nil, errors.Wrap(err, "tar file path does not exist") + } + if fi.Size() <= emptyTarSize && !s.opts.ForceBuildMetadata { + logrus.Info("No files were changed, appending empty layer to config. No layer added to image.") + return nil, nil + } + + var layer v1.Layer + if s.opts.CompressedCaching == true { + layer, err = tarball.LayerFromFile(tarPath, tarball.WithCompressedCaching) + } else { + layer, err = tarball.LayerFromFile(tarPath) + } + if err != nil { + return nil, err + } + + return layer, nil +} +func (s *stageBuilder) saveLayerToImage(layer v1.Layer, createdBy string) error { + var err error + s.image, err = mutate.Append(s.image, + mutate.Addendum{ + Layer: layer, + History: v1.History{ + Author: constants.Author, + CreatedBy: createdBy, + }, + }, + ) + return err +} + +func CalculateDependencies(stages []config.KanikoStage, opts *config.KanikoOptions, stageNameToIdx map[string]string) (map[int][]string, error) { + images := []v1.Image{} + depGraph := map[int][]string{} + for _, s := range stages { + ba := dockerfile.NewBuildArgs(opts.BuildArgs) + ba.AddMetaArgs(s.MetaArgs) + var image v1.Image + var err error + if s.BaseImageStoredLocally { + image = images[s.BaseImageIndex] + } else if s.Name == constants.NoBaseImage { + image = empty.Image + } else { + image, err = image_util.RetrieveSourceImage(s, opts) + if err != nil { + return nil, err + } + } + cfg, err := initializeConfig(image, opts) + if err != nil { + return nil, err + } + + cmds, err := dockerfile.GetOnBuildInstructions(&cfg.Config, stageNameToIdx) + cmds = append(cmds, s.Commands...) + + for _, c := range cmds { + switch cmd := c.(type) { + case *instructions.CopyCommand: + if cmd.From != "" { + i, err := strconv.Atoi(cmd.From) + if err != nil { + continue + } + resolved, err := util.ResolveEnvironmentReplacementList(cmd.SourcesAndDest, ba.ReplacementEnvs(cfg.Config.Env), true) + if err != nil { + return nil, err + } + depGraph[i] = append(depGraph[i], resolved[0:len(resolved)-1]...) + } + case *instructions.EnvCommand: + if err := util.UpdateConfigEnv(cmd.Env, &cfg.Config, ba.ReplacementEnvs(cfg.Config.Env)); err != nil { + return nil, err + } + image, err = mutate.Config(image, cfg.Config) + if err != nil { + return nil, err + } + case *instructions.ArgCommand: + k, v, err := commands.ParseArg(cmd.Key, cmd.Value, cfg.Config.Env, ba) + if err != nil { + return nil, err + } + ba.AddArg(k, v) + } + } + images = append(images, image) + } + return depGraph, nil +} + +// DoBuild executes building the Dockerfile +func DoBuild(opts *config.KanikoOptions) (v1.Image, error) { + t := timing.Start("Total Build Time") + digestToCacheKey := make(map[string]string) + stageIdxToDigest := make(map[string]string) + + stages, metaArgs, err := dockerfile.ParseStages(opts) + if err != nil { + return nil, err + } + + kanikoStages, err := dockerfile.MakeKanikoStages(opts, stages, metaArgs) + if err != nil { + return nil, err + } + stageNameToIdx := ResolveCrossStageInstructions(kanikoStages) + + fileContext, err := util.NewFileContextFromDockerfile(opts.DockerfilePath, opts.SrcContext) + if err != nil { + return nil, err + } + + // Some stages may refer to other random images, not previous stages + if err := fetchExtraStages(kanikoStages, opts); err != nil { + return nil, err + } + crossStageDependencies, err := CalculateDependencies(kanikoStages, opts, stageNameToIdx) + if err != nil { + return nil, err + } + logrus.Infof("Built cross stage deps: %v", crossStageDependencies) + + for index, stage := range kanikoStages { + sb, err := newStageBuilder(opts, stage, crossStageDependencies, digestToCacheKey, stageIdxToDigest, stageNameToIdx, fileContext) + if err != nil { + return nil, err + } + if err := sb.build(); err != nil { + return nil, errors.Wrap(err, "error building stage") + } + + reviewConfig(stage, &sb.cf.Config) + + sourceImage, err := mutate.Config(sb.image, sb.cf.Config) + if err != nil { + return nil, err + } + + configFile, err := sourceImage.ConfigFile() + if err != nil { + return nil, err + } + if opts.CustomPlatform == "" { + configFile.OS = runtime.GOOS + configFile.Architecture = runtime.GOARCH + } else { + configFile.OS = strings.Split(opts.CustomPlatform, "/")[0] + configFile.Architecture = strings.Split(opts.CustomPlatform, "/")[1] + } + sourceImage, err = mutate.ConfigFile(sourceImage, configFile) + if err != nil { + return nil, err + } + + d, err := sourceImage.Digest() + if err != nil { + return nil, err + } + + stageIdxToDigest[fmt.Sprintf("%d", sb.stage.Index)] = d.String() + logrus.Debugf("mapping stage idx %v to digest %v", sb.stage.Index, d.String()) + + digestToCacheKey[d.String()] = sb.finalCacheKey + logrus.Debugf("mapping digest %v to cachekey %v", d.String(), sb.finalCacheKey) + + if stage.Final { + sourceImage, err = mutate.CreatedAt(sourceImage, v1.Time{Time: time.Now()}) + if err != nil { + return nil, err + } + if opts.Reproducible { + sourceImage, err = mutate.Canonical(sourceImage) + if err != nil { + return nil, err + } + } + if opts.Cleanup { + if err = util.DeleteFilesystem(); err != nil { + return nil, err + } + } + timing.DefaultRun.Stop(t) + return sourceImage, nil + } + if stage.SaveStage { + if err := saveStageAsTarball(strconv.Itoa(index), sourceImage); err != nil { + return nil, err + } + } + + filesToSave, err := filesToSave(crossStageDependencies[index]) + if err != nil { + return nil, err + } + dstDir := filepath.Join(config.KanikoDir, strconv.Itoa(index)) + if err := os.MkdirAll(dstDir, 0644); err != nil { + return nil, errors.Wrap(err, + fmt.Sprintf("to create workspace for stage %s", + stageIdxToDigest[strconv.Itoa(index)], + )) + } + for _, p := range filesToSave { + logrus.Infof("Saving file %s for later use", p) + if err := util.CopyFileOrSymlink(p, dstDir, config.RootDir); err != nil { + return nil, errors.Wrap(err, "could not save file") + } + } + + // Delete the filesystem + if err := util.DeleteFilesystem(); err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("deleting file system after stage %d", index)) + } + } + + return nil, err +} + +// fileToSave returns all the files matching the given pattern in deps. +// If a file is a symlink, it also returns the target file. +func filesToSave(deps []string) ([]string, error) { + srcFiles := []string{} + for _, src := range deps { + srcs, err := filepath.Glob(filepath.Join(config.RootDir, src)) + if err != nil { + return nil, err + } + for _, f := range srcs { + if link, err := util.EvalSymLink(f); err == nil { + link, err = filepath.Rel(config.RootDir, link) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("could not find relative path to %s", config.RootDir)) + } + srcFiles = append(srcFiles, link) + } + f, err = filepath.Rel(config.RootDir, f) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("could not find relative path to %s", config.RootDir)) + } + srcFiles = append(srcFiles, f) + } + } + // remove duplicates + deduped := []string{} + m := map[string]struct{}{} + for _, f := range srcFiles { + if _, ok := m[f]; !ok { + deduped = append(deduped, f) + m[f] = struct{}{} + } + } + return deduped, nil +} + +func fetchExtraStages(stages []config.KanikoStage, opts *config.KanikoOptions) error { + t := timing.Start("Fetching Extra Stages") + defer timing.DefaultRun.Stop(t) + + var names []string + + for stageIndex, s := range stages { + for _, cmd := range s.Commands { + c, ok := cmd.(*instructions.CopyCommand) + if !ok || c.From == "" { + continue + } + + // FROMs at this point are guaranteed to be either an integer referring to a previous stage, + // the name of a previous stage, or a name of a remote image. + + // If it is an integer stage index, validate that it is actually a previous index + if fromIndex, err := strconv.Atoi(c.From); err == nil && stageIndex > fromIndex && fromIndex >= 0 { + continue + } + // Check if the name is the alias of a previous stage + if fromPreviousStage(c, names) { + continue + } + + // This must be an image name, fetch it. + logrus.Debugf("Found extra base image stage %s", c.From) + sourceImage, err := remote.RetrieveRemoteImage(c.From, opts.RegistryOptions, opts.CustomPlatform) + if err != nil { + return err + } + if err := saveStageAsTarball(c.From, sourceImage); err != nil { + return err + } + if err := extractImageToDependencyDir(c.From, sourceImage); err != nil { + return err + } + } + // Store the name of the current stage in the list with names, if applicable. + if s.Name != "" { + names = append(names, s.Name) + } + } + return nil +} + +func fromPreviousStage(copyCommand *instructions.CopyCommand, previousStageNames []string) bool { + for _, previousStageName := range previousStageNames { + if previousStageName == copyCommand.From { + return true + } + } + return false +} + +func extractImageToDependencyDir(name string, image v1.Image) error { + t := timing.Start("Extracting Image to Dependency Dir") + defer timing.DefaultRun.Stop(t) + dependencyDir := filepath.Join(config.KanikoDir, name) + if err := os.MkdirAll(dependencyDir, 0755); err != nil { + return err + } + logrus.Debugf("trying to extract to %s", dependencyDir) + _, err := util.GetFSFromImage(dependencyDir, image, util.ExtractFile) + return err +} + +func saveStageAsTarball(path string, image v1.Image) error { + t := timing.Start("Saving stage as tarball") + defer timing.DefaultRun.Stop(t) + destRef, err := name.NewTag("temp/tag", name.WeakValidation) + if err != nil { + return err + } + tarPath := filepath.Join(constants.KanikoIntermediateStagesDir, path) + logrus.Infof("Storing source image from stage %s at path %s", path, tarPath) + if err := os.MkdirAll(filepath.Dir(tarPath), 0750); err != nil { + return err + } + return tarball.WriteToFile(tarPath, destRef, image) +} + +func getHasher(snapshotMode string) (func(string) (string, error), error) { + switch snapshotMode { + case constants.SnapshotModeTime: + logrus.Info("Only file modification time will be considered when snapshotting") + return util.MtimeHasher(), nil + case constants.SnapshotModeFull: + return util.Hasher(), nil + case constants.SnapshotModeRedo: + return util.RedoHasher(), nil + default: + return nil, fmt.Errorf("%s is not a valid snapshot mode", snapshotMode) + } +} + +func resolveOnBuild(stage *config.KanikoStage, config *v1.Config, stageNameToIdx map[string]string) error { + cmds, err := dockerfile.GetOnBuildInstructions(config, stageNameToIdx) + if err != nil { + return err + } + + // Append to the beginning of the commands in the stage + stage.Commands = append(cmds, stage.Commands...) + logrus.Infof("Executing %v build triggers", len(cmds)) + + // Blank out the Onbuild command list for this image + config.OnBuild = nil + return nil +} + +// reviewConfig makes sure the value of CMD is correct after building the stage +// If ENTRYPOINT was set in this stage but CMD wasn't, then CMD should be cleared out +// See Issue #346 for more info +func reviewConfig(stage config.KanikoStage, config *v1.Config) { + entrypoint := false + cmd := false + + for _, c := range stage.Commands { + if c.Name() == constants.Cmd { + cmd = true + } + if c.Name() == constants.Entrypoint { + entrypoint = true + } + } + if entrypoint && !cmd { + config.Cmd = nil + } +} + +// iterates over a list of KanikoStage and resolves instructions referring to earlier stages +// returns a mapping of stage name to stage id, f.e - ["first": "0", "second": "1", "target": "2"] +func ResolveCrossStageInstructions(stages []config.KanikoStage) map[string]string { + nameToIndex := make(map[string]string) + for i, stage := range stages { + index := strconv.Itoa(i) + if stage.Name != "" { + nameToIndex[stage.Name] = index + } + dockerfile.ResolveCrossStageCommands(stage.Commands, nameToIndex) + } + + logrus.Debugf("Built stage name to index map: %v", nameToIndex) + return nameToIndex +} + +func (s stageBuilder) initSnapshotWithTimings() error { + t := timing.Start("Initial FS snapshot") + if err := s.snapshotter.Init(); err != nil { + return err + } + timing.DefaultRun.Stop(t) + return nil +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/executor/composite_cache.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/executor/composite_cache.go new file mode 100644 index 000000000..2c1e00d25 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/executor/composite_cache.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package executor + +import ( + "crypto/sha256" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/pkg/errors" +) + +// NewCompositeCache returns an initialized composite cache object. +func NewCompositeCache(initial ...string) *CompositeCache { + c := CompositeCache{ + keys: initial, + } + return &c +} + +// CompositeCache is a type that generates a cache key from a series of keys. +type CompositeCache struct { + keys []string +} + +// AddKey adds the specified key to the sequence. +func (s *CompositeCache) AddKey(k ...string) { + s.keys = append(s.keys, k...) +} + +// Key returns the human readable composite key as a string. +func (s *CompositeCache) Key() string { + return strings.Join(s.keys, "-") +} + +// Hash returns the composite key in a string SHA256 format. +func (s *CompositeCache) Hash() (string, error) { + return util.SHA256(strings.NewReader(s.Key())) +} + +func (s *CompositeCache) AddPath(p string, context util.FileContext) error { + sha := sha256.New() + fi, err := os.Lstat(p) + if err != nil { + return errors.Wrap(err, "could not add path") + } + + if fi.Mode().IsDir() { + empty, k, err := hashDir(p, context) + if err != nil { + return err + } + + // Only add the hash of this directory to the key + // if there is any ignored content. + if !empty || !context.ExcludesFile(p) { + s.keys = append(s.keys, k) + } + return nil + } + + if context.ExcludesFile(p) { + return nil + } + fh, err := util.CacheHasher()(p) + if err != nil { + return err + } + if _, err := sha.Write([]byte(fh)); err != nil { + return err + } + + s.keys = append(s.keys, fmt.Sprintf("%x", sha.Sum(nil))) + return nil +} + +// HashDir returns a hash of the directory. +func hashDir(p string, context util.FileContext) (bool, string, error) { + sha := sha256.New() + empty := true + if err := filepath.Walk(p, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + exclude := context.ExcludesFile(path) + if exclude { + return nil + } + + fileHash, err := util.CacheHasher()(path) + if err != nil { + return err + } + if _, err := sha.Write([]byte(fileHash)); err != nil { + return err + } + empty = false + return nil + }); err != nil { + return false, "", err + } + + return empty, fmt.Sprintf("%x", sha.Sum(nil)), nil +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/executor/fakes.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/executor/fakes.go new file mode 100644 index 000000000..4b8dec1c6 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/executor/fakes.go @@ -0,0 +1,197 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// for use in tests +package executor + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + + "github.com/GoogleContainerTools/kaniko/pkg/commands" + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type fakeSnapShotter struct { + file string + tarPath string +} + +func (f fakeSnapShotter) Init() error { return nil } +func (f fakeSnapShotter) TakeSnapshotFS() (string, error) { + return f.tarPath, nil +} +func (f fakeSnapShotter) TakeSnapshot(_ []string, _, _ bool) (string, error) { + return f.tarPath, nil +} + +type MockDockerCommand struct { + command string + contextFiles []string + cacheCommand commands.DockerCommand +} + +func (m MockDockerCommand) ExecuteCommand(c *v1.Config, args *dockerfile.BuildArgs) error { return nil } +func (m MockDockerCommand) String() string { + return m.command +} +func (m MockDockerCommand) FilesToSnapshot() []string { + return []string{"meow-snapshot-no-cache"} +} +func (m MockDockerCommand) ProvidesFilesToSnapshot() bool { + return true +} +func (m MockDockerCommand) CacheCommand(image v1.Image) commands.DockerCommand { + return m.cacheCommand +} +func (m MockDockerCommand) FilesUsedFromContext(c *v1.Config, args *dockerfile.BuildArgs) ([]string, error) { + return m.contextFiles, nil +} +func (m MockDockerCommand) MetadataOnly() bool { + return false +} +func (m MockDockerCommand) RequiresUnpackedFS() bool { + return false +} +func (m MockDockerCommand) ShouldCacheOutput() bool { + return true +} +func (m MockDockerCommand) ShouldDetectDeletedFiles() bool { + return false +} + +type MockCachedDockerCommand struct { + contextFiles []string +} + +func (m MockCachedDockerCommand) ExecuteCommand(c *v1.Config, args *dockerfile.BuildArgs) error { + return nil +} +func (m MockCachedDockerCommand) String() string { + return "meow" +} +func (m MockCachedDockerCommand) FilesToSnapshot() []string { + return []string{"meow-snapshot"} +} +func (m MockCachedDockerCommand) ProvidesFilesToSnapshot() bool { + return true +} +func (m MockCachedDockerCommand) CacheCommand(image v1.Image) commands.DockerCommand { + return nil +} +func (m MockCachedDockerCommand) ShouldDetectDeletedFiles() bool { + return false +} +func (m MockCachedDockerCommand) FilesUsedFromContext(c *v1.Config, args *dockerfile.BuildArgs) ([]string, error) { + return m.contextFiles, nil +} +func (m MockCachedDockerCommand) MetadataOnly() bool { + return false +} +func (m MockCachedDockerCommand) RequiresUnpackedFS() bool { + return false +} +func (m MockCachedDockerCommand) ShouldCacheOutput() bool { + return false +} + +type fakeLayerCache struct { + retrieve bool + receivedKeys []string + img v1.Image + keySequence []string +} + +func (f *fakeLayerCache) RetrieveLayer(key string) (v1.Image, error) { + f.receivedKeys = append(f.receivedKeys, key) + if len(f.keySequence) > 0 { + if f.keySequence[0] == key { + f.keySequence = f.keySequence[1:] + return f.img, nil + } + return f.img, errors.New("could not find layer") + } + + if !f.retrieve { + return nil, errors.New("could not find layer") + } + return f.img, nil +} + +type fakeLayer struct { + TarContent []byte +} + +func (f fakeLayer) Digest() (v1.Hash, error) { + return v1.Hash{}, nil +} +func (f fakeLayer) DiffID() (v1.Hash, error) { + return v1.Hash{}, nil +} +func (f fakeLayer) Compressed() (io.ReadCloser, error) { + return nil, nil +} +func (f fakeLayer) Uncompressed() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(f.TarContent)), nil +} +func (f fakeLayer) Size() (int64, error) { + return 0, nil +} +func (f fakeLayer) MediaType() (types.MediaType, error) { + return "", nil +} + +type fakeImage struct { + ImageLayers []v1.Layer +} + +func (f fakeImage) Layers() ([]v1.Layer, error) { + return f.ImageLayers, nil +} +func (f fakeImage) MediaType() (types.MediaType, error) { + return "", nil +} +func (f fakeImage) Size() (int64, error) { + return 0, nil +} +func (f fakeImage) ConfigName() (v1.Hash, error) { + return v1.Hash{}, nil +} +func (f fakeImage) ConfigFile() (*v1.ConfigFile, error) { + return &v1.ConfigFile{}, nil +} +func (f fakeImage) RawConfigFile() ([]byte, error) { + return []byte{}, nil +} +func (f fakeImage) Digest() (v1.Hash, error) { + return v1.Hash{}, nil +} +func (f fakeImage) Manifest() (*v1.Manifest, error) { + return &v1.Manifest{}, nil +} +func (f fakeImage) RawManifest() ([]byte, error) { + return []byte{}, nil +} +func (f fakeImage) LayerByDigest(v1.Hash) (v1.Layer, error) { + return fakeLayer{}, nil +} +func (f fakeImage) LayerByDiffID(v1.Hash) (v1.Layer, error) { + return fakeLayer{}, nil +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/executor/push.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/executor/push.go new file mode 100644 index 000000000..a989db920 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/executor/push.go @@ -0,0 +1,370 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package executor + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/GoogleContainerTools/kaniko/pkg/cache" + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/GoogleContainerTools/kaniko/pkg/constants" + "github.com/GoogleContainerTools/kaniko/pkg/creds" + "github.com/GoogleContainerTools/kaniko/pkg/timing" + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/GoogleContainerTools/kaniko/pkg/version" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/layout" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/afero" +) + +type withUserAgent struct { + t http.RoundTripper +} + +// for testing +var ( + newRetry = transport.NewRetry +) + +const ( + UpstreamClientUaKey = "UPSTREAM_CLIENT_TYPE" +) + +// DockerConfLocation returns the file system location of the Docker +// configuration file under the directory set in the DOCKER_CONFIG environment +// variable. If that variable is not set, it returns the OS-equivalent of +// "/kaniko/.docker/config.json". +func DockerConfLocation() string { + configFile := "config.json" + if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { + file, err := os.Stat(dockerConfig) + if err == nil { + if file.IsDir() { + return filepath.Join(dockerConfig, configFile) + } + } else { + if os.IsNotExist(err) { + return string(os.PathSeparator) + filepath.Join("kaniko", ".docker", configFile) + } + } + return filepath.Clean(dockerConfig) + } + return string(os.PathSeparator) + filepath.Join("kaniko", ".docker", configFile) +} + +func (w *withUserAgent) RoundTrip(r *http.Request) (*http.Response, error) { + ua := []string{fmt.Sprintf("kaniko/%s", version.Version())} + if upstream := os.Getenv(UpstreamClientUaKey); upstream != "" { + ua = append(ua, upstream) + } + r.Header.Set("User-Agent", strings.Join(ua, ",")) + return w.t.RoundTrip(r) +} + +// for testing +var ( + fs = afero.NewOsFs() + execCommand = exec.Command + checkRemotePushPermission = remote.CheckPushPermission +) + +// CheckPushPermissions checks that the configured credentials can be used to +// push to every specified destination. +func CheckPushPermissions(opts *config.KanikoOptions) error { + targets := opts.Destinations + // When no push is set, whe want to check permissions for the cache repo + // instead of the destinations + if opts.NoPush { + targets = []string{opts.CacheRepo} + } + + checked := map[string]bool{} + _, err := fs.Stat(DockerConfLocation()) + dockerConfNotExists := os.IsNotExist(err) + for _, destination := range targets { + destRef, err := name.NewTag(destination, name.WeakValidation) + if err != nil { + return errors.Wrap(err, "getting tag for destination") + } + if checked[destRef.Context().String()] { + continue + } + + registryName := destRef.Repository.Registry.Name() + // Historically kaniko was pre-configured by default with gcr credential helper, + // in here we keep the backwards compatibility by enabling the GCR helper only + // when gcr.io (or pkg.dev) is in one of the destinations. + if registryName == "gcr.io" || strings.HasSuffix(registryName, ".gcr.io") || strings.HasSuffix(registryName, ".pkg.dev") { + // Checking for existence of docker.config as it's normally required for + // authenticated registries and prevent overwriting user provided docker conf + if dockerConfNotExists { + flags := fmt.Sprintf("--registries=%s", registryName) + cmd := execCommand("docker-credential-gcr", "configure-docker", flags) + var out bytes.Buffer + cmd.Stderr = &out + if err := cmd.Run(); err != nil { + return errors.Wrap(err, fmt.Sprintf("error while configuring docker-credential-gcr helper: %s : %s", cmd.String(), out.String())) + } + } else { + logrus.Warnf("\nSkip running docker-credential-gcr as user provided docker configuration exists at %s", DockerConfLocation()) + } + } + if opts.Insecure || opts.InsecureRegistries.Contains(registryName) { + newReg, err := name.NewRegistry(registryName, name.WeakValidation, name.Insecure) + if err != nil { + return errors.Wrap(err, "getting new insecure registry") + } + destRef.Repository.Registry = newReg + } + tr := newRetry(util.MakeTransport(opts.RegistryOptions, registryName)) + if err := checkRemotePushPermission(destRef, creds.GetKeychain(), tr); err != nil { + return errors.Wrapf(err, "checking push permission for %q", destRef) + } + checked[destRef.Context().String()] = true + } + return nil +} + +func getDigest(image v1.Image) ([]byte, error) { + digest, err := image.Digest() + if err != nil { + return nil, err + } + return []byte(digest.String()), nil +} + +func writeDigestFile(path string, digestByteArray []byte) error { + parentDir := filepath.Dir(path) + if _, err := os.Stat(parentDir); os.IsNotExist(err) { + if err := os.MkdirAll(parentDir, 0700); err != nil { + logrus.Debugf("error creating %s, %s", parentDir, err) + return err + } + logrus.Tracef("Created directory %v", parentDir) + } + return ioutil.WriteFile(path, digestByteArray, 0644) +} + +// DoPush is responsible for pushing image to the destinations specified in opts +func DoPush(image v1.Image, opts *config.KanikoOptions) error { + t := timing.Start("Total Push Time") + var digestByteArray []byte + var builder strings.Builder + if opts.DigestFile != "" || opts.ImageNameDigestFile != "" || opts.ImageNameTagDigestFile != "" { + var err error + digestByteArray, err = getDigest(image) + if err != nil { + return errors.Wrap(err, "error fetching digest") + } + } + + if opts.DigestFile != "" { + err := writeDigestFile(opts.DigestFile, digestByteArray) + if err != nil { + return errors.Wrap(err, "writing digest to file failed") + } + } + + if opts.OCILayoutPath != "" { + path, err := layout.Write(opts.OCILayoutPath, empty.Index) + if err != nil { + return errors.Wrap(err, "writing empty layout") + } + if err := path.AppendImage(image); err != nil { + return errors.Wrap(err, "appending image") + } + } + + destRefs := []name.Tag{} + for _, destination := range opts.Destinations { + destRef, err := name.NewTag(destination, name.WeakValidation) + if err != nil { + return errors.Wrap(err, "getting tag for destination") + } + if opts.ImageNameDigestFile != "" || opts.ImageNameTagDigestFile != "" { + tag := "" + if opts.ImageNameTagDigestFile != "" && destRef.TagStr() != "" { + tag = ":" + destRef.TagStr() + } + imageName := []byte(destRef.Repository.Name() + tag + "@") + builder.Write(append(imageName, digestByteArray...)) + builder.WriteString("\n") + } + destRefs = append(destRefs, destRef) + } + + if opts.ImageNameDigestFile != "" { + err := writeDigestFile(opts.ImageNameDigestFile, []byte(builder.String())) + if err != nil { + return errors.Wrap(err, "writing image name with digest to file failed") + } + } + + if opts.ImageNameTagDigestFile != "" { + err := writeDigestFile(opts.ImageNameTagDigestFile, []byte(builder.String())) + if err != nil { + return errors.Wrap(err, "writing image name with image tag and digest to file failed") + } + } + + if opts.TarPath != "" { + tagToImage := map[name.Tag]v1.Image{} + + if len(destRefs) == 0 { + return errors.New("must provide at least one destination when tarPath is specified") + } + + for _, destRef := range destRefs { + tagToImage[destRef] = image + } + err := tarball.MultiWriteToFile(opts.TarPath, tagToImage) + if err != nil { + return errors.Wrap(err, "writing tarball to file failed") + } + } + + if opts.NoPush { + logrus.Info("Skipping push to container registry due to --no-push flag") + return nil + } + + // continue pushing unless an error occurs + for _, destRef := range destRefs { + registryName := destRef.Repository.Registry.Name() + if opts.Insecure || opts.InsecureRegistries.Contains(registryName) { + newReg, err := name.NewRegistry(registryName, name.WeakValidation, name.Insecure) + if err != nil { + return errors.Wrap(err, "getting new insecure registry") + } + destRef.Repository.Registry = newReg + } + + pushAuth, err := creds.GetKeychain().Resolve(destRef.Context().Registry) + if err != nil { + return errors.Wrap(err, "resolving pushAuth") + } + + tr := newRetry(util.MakeTransport(opts.RegistryOptions, registryName)) + rt := &withUserAgent{t: tr} + + logrus.Infof("Pushing image to %s", destRef.String()) + + retryFunc := func() error { + return remote.Write(destRef, image, remote.WithAuth(pushAuth), remote.WithTransport(rt)) + } + + if err := util.Retry(retryFunc, opts.PushRetry, 1000); err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to push to destination %s", destRef)) + } + } + timing.DefaultRun.Stop(t) + logrus.Infof("Pushed image to %d destinations", len(destRefs)) + return writeImageOutputs(image, destRefs) +} + +func writeImageOutputs(image v1.Image, destRefs []name.Tag) error { + dir := os.Getenv("BUILDER_OUTPUT") + if dir == "" { + return nil + } + f, err := fs.Create(filepath.Join(dir, "images")) + if err != nil { + return err + } + defer f.Close() + + d, err := image.Digest() + if err != nil { + return err + } + + type imageOutput struct { + Name string `json:"name"` + Digest string `json:"digest"` + } + for _, r := range destRefs { + if err := json.NewEncoder(f).Encode(imageOutput{ + Name: r.String(), + Digest: d.String(), + }); err != nil { + return err + } + } + return nil +} + +// pushLayerToCache pushes layer (tagged with cacheKey) to opts.Cache +// if opts.Cache doesn't exist, infer the cache from the given destination +func pushLayerToCache(opts *config.KanikoOptions, cacheKey string, tarPath string, createdBy string) error { + var layer v1.Layer + var err error + if opts.CompressedCaching == true { + layer, err = tarball.LayerFromFile(tarPath, tarball.WithCompressedCaching) + } else { + layer, err = tarball.LayerFromFile(tarPath) + } + if err != nil { + return err + } + cache, err := cache.Destination(opts, cacheKey) + if err != nil { + return errors.Wrap(err, "getting cache destination") + } + logrus.Infof("Pushing layer %s to cache now", cache) + empty := empty.Image + empty, err = mutate.CreatedAt(empty, v1.Time{Time: time.Now()}) + if err != nil { + return errors.Wrap(err, "setting empty image created time") + } + + empty, err = mutate.Append(empty, + mutate.Addendum{ + Layer: layer, + History: v1.History{ + Author: constants.Author, + CreatedBy: createdBy, + }, + }, + ) + if err != nil { + return errors.Wrap(err, "appending layer onto empty image") + } + cacheOpts := *opts + cacheOpts.TarPath = "" // tarPath doesn't make sense for Docker layers + cacheOpts.NoPush = false // we want to push cached layers + cacheOpts.Destinations = []string{cache} + cacheOpts.InsecureRegistries = opts.InsecureRegistries + cacheOpts.SkipTLSVerifyRegistries = opts.SkipTLSVerifyRegistries + return DoPush(empty, &cacheOpts) +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/filesystem/resolve.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/filesystem/resolve.go new file mode 100644 index 000000000..9ab2819c9 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/filesystem/resolve.go @@ -0,0 +1,165 @@ +/* +Copyright 2020 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "os" + "path/filepath" + + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ResolvePaths takes a slice of file paths and a list of skipped file paths. It resolve each +// file path according to a set of rules and then returns a slice of resolved paths or error. +// File paths are resolved according to the following rules: +// * If path is in ignorelist, skip it. +// * If path is a symlink, resolve it's ancestor link and add it to the output set. +// * If path is a symlink, resolve it's target. If the target is not ignored add it to the +// output set. +// * Add all ancestors of each path to the output set. +func ResolvePaths(paths []string, wl []util.IgnoreListEntry) (pathsToAdd []string, err error) { + logrus.Tracef("Resolving paths %s", paths) + + fileSet := make(map[string]bool) + + for _, f := range paths { + // If the given path is part of the ignorelist ignore it + if util.IsInProvidedIgnoreList(f, wl) { + logrus.Debugf("path %s is in list to ignore, ignoring it", f) + continue + } + + link, e := resolveSymlinkAncestor(f) + if e != nil { + continue + } + + if f != link { + logrus.Tracef("updated link %s to %s", f, link) + } + + if !fileSet[link] { + pathsToAdd = append(pathsToAdd, link) + } + fileSet[link] = true + + var evaled string + + // If the path is a symlink we need to also consider the target of that + // link + evaled, e = filepath.EvalSymlinks(f) + if e != nil { + if !os.IsNotExist(e) { + logrus.Errorf("couldn't eval %s with link %s", f, link) + return + } + + logrus.Debugf("symlink path %s, target does not exist", f) + continue + } + if f != evaled { + logrus.Debugf("resolved symlink %s to %s", f, evaled) + } + + // If the given path is a symlink and the target is part of the ignorelist + // ignore the target + if util.CheckProvidedIgnoreList(evaled, wl) { + logrus.Debugf("path %s is ignored, ignoring it", evaled) + continue + } + + if !fileSet[evaled] { + pathsToAdd = append(pathsToAdd, evaled) + } + fileSet[evaled] = true + } + + // Also add parent directories to keep the permission of them correctly. + pathsToAdd = filesWithParentDirs(pathsToAdd) + return +} + +// filesWithParentDirs returns every ancestor path for each provided file path. +// I.E. /foo/bar/baz/boom.txt => [/, /foo, /foo/bar, /foo/bar/baz, /foo/bar/baz/boom.txt] +func filesWithParentDirs(files []string) []string { + filesSet := map[string]bool{} + + for _, file := range files { + file = filepath.Clean(file) + filesSet[file] = true + + for _, dir := range util.ParentDirectories(file) { + dir = filepath.Clean(dir) + filesSet[dir] = true + } + } + + newFiles := []string{} + for file := range filesSet { + newFiles = append(newFiles, file) + } + + return newFiles +} + +// resolveSymlinkAncestor returns the ancestor link of the provided symlink path or returns the +// the path if it is not a link. The ancestor link is the filenode whose type is a Symlink. +// E.G /baz/boom/bar.txt links to /usr/bin/bar.txt but /baz/boom/bar.txt itself is not a link. +// Instead /bar/boom is actually a link to /usr/bin. In this case resolveSymlinkAncestor would +// return /bar/boom. +func resolveSymlinkAncestor(path string) (string, error) { + if !filepath.IsAbs(path) { + return "", errors.New("dest path must be abs") + } + + last := "" + newPath := filepath.Clean(path) + +loop: + for newPath != config.RootDir { + fi, err := os.Lstat(newPath) + if err != nil { + return "", errors.Wrap(err, "resolvePaths: failed to lstat") + } + + if util.IsSymlink(fi) { + last = filepath.Base(newPath) + newPath = filepath.Dir(newPath) + } else { + // Even if the filenode pointed to by newPath is a regular file, + // one of its ancestors could be a symlink. We call filepath.EvalSymlinks + // to test whether there are any links in the path. If the output of + // EvalSymlinks is different than the input we know one of the nodes in the + // the path is a link. + target, err := filepath.EvalSymlinks(newPath) + if err != nil { + return "", err + } + if target != newPath { + last = filepath.Base(newPath) + newPath = filepath.Dir(newPath) + } else { + break loop + } + } + } + newPath = filepath.Join(newPath, last) + return filepath.Clean(newPath), nil +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/image/image_util.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/image/image_util.go new file mode 100644 index 000000000..21ec54ab0 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/image/image_util.go @@ -0,0 +1,120 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package image + +import ( + "fmt" + "path/filepath" + "strconv" + + "github.com/GoogleContainerTools/kaniko/pkg/cache" + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/GoogleContainerTools/kaniko/pkg/constants" + "github.com/GoogleContainerTools/kaniko/pkg/image/remote" + "github.com/GoogleContainerTools/kaniko/pkg/timing" + "github.com/GoogleContainerTools/kaniko/pkg/util" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/tarball" + + "github.com/sirupsen/logrus" +) + +var ( + // RetrieveRemoteImage downloads an image from a remote location + RetrieveRemoteImage = remote.RetrieveRemoteImage + retrieveTarImage = tarballImage +) + +// RetrieveSourceImage returns the base image of the stage at index +func RetrieveSourceImage(stage config.KanikoStage, opts *config.KanikoOptions) (v1.Image, error) { + t := timing.Start("Retrieving Source Image") + defer timing.DefaultRun.Stop(t) + var buildArgs []string + + for _, arg := range stage.MetaArgs { + buildArgs = append(buildArgs, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString())) + } + buildArgs = append(buildArgs, opts.BuildArgs...) + currentBaseName, err := util.ResolveEnvironmentReplacement(stage.BaseName, buildArgs, false) + if err != nil { + return nil, err + } + // First, check if the base image is a scratch image + if currentBaseName == constants.NoBaseImage { + logrus.Info("No base image, nothing to extract") + return empty.Image, nil + } + // Next, check if the base image of the current stage is built from a previous stage + // If so, retrieve the image from the stored tarball + if stage.BaseImageStoredLocally { + return retrieveTarImage(stage.BaseImageIndex) + } + + // Finally, check if local caching is enabled + // If so, look in the local cache before trying the remote registry + if opts.Cache && opts.CacheDir != "" { + cachedImage, err := cachedImage(opts, currentBaseName) + if err != nil { + switch { + case cache.IsNotFound(err): + logrus.Debugf("Image %v not found in cache", currentBaseName) + case cache.IsExpired(err): + logrus.Debugf("Image %v found in cache but was expired", currentBaseName) + default: + logrus.Errorf("Error while retrieving image from cache: %v %v", currentBaseName, err) + } + } else if cachedImage != nil { + return cachedImage, nil + } + } + + // Otherwise, initialize image as usual + return RetrieveRemoteImage(currentBaseName, opts.RegistryOptions, opts.CustomPlatform) +} + +func tarballImage(index int) (v1.Image, error) { + tarPath := filepath.Join(constants.KanikoIntermediateStagesDir, strconv.Itoa(index)) + logrus.Infof("Base image from previous stage %d found, using saved tar at path %s", index, tarPath) + return tarball.ImageFromPath(tarPath, nil) +} + +func cachedImage(opts *config.KanikoOptions, image string) (v1.Image, error) { + ref, err := name.ParseReference(image, name.WeakValidation) + if err != nil { + return nil, err + } + + var cacheKey string + if d, ok := ref.(name.Digest); ok { + cacheKey = d.DigestStr() + } else { + image, err := remote.RetrieveRemoteImage(image, opts.RegistryOptions, opts.CustomPlatform) + if err != nil { + return nil, err + } + + d, err := image.Digest() + if err != nil { + return nil, err + } + cacheKey = d.String() + } + return cache.LocalSource(&opts.CacheOptions, cacheKey) +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/image/remote/remote.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/image/remote/remote.go new file mode 100644 index 000000000..88a527839 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/image/remote/remote.go @@ -0,0 +1,169 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remote + +import ( + "runtime" + "strings" + + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/GoogleContainerTools/kaniko/pkg/creds" + "github.com/GoogleContainerTools/kaniko/pkg/util" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote" + + "github.com/sirupsen/logrus" +) + +var ( + manifestCache = make(map[string]v1.Image) +) + +// RetrieveRemoteImage retrieves the manifest for the specified image from the specified registry +func RetrieveRemoteImage(image string, opts config.RegistryOptions, customPlatform string) (v1.Image, error) { + logrus.Infof("Retrieving image manifest %s", image) + + cachedRemoteImage := manifestCache[image] + if cachedRemoteImage != nil { + logrus.Infof("Returning cached image manifest") + return cachedRemoteImage, nil + } + + ref, err := name.ParseReference(image, name.WeakValidation) + if err != nil { + return nil, err + } + + if ref.Context().RegistryStr() == name.DefaultRegistry { + ref, err := normalizeReference(ref, image) + if err != nil { + return nil, err + } + + for _, registryMirror := range opts.RegistryMirrors { + var newRepo name.Repository + if opts.InsecurePull || opts.InsecureRegistries.Contains(registryMirror) { + newRepo, err = name.NewRepository(registryMirror, name.WeakValidation, name.Insecure) + } else { + newRepo, err = name.NewRepository(registryMirror, name.StrictValidation) + } + if err != nil { + return nil, err + } + ref := setNewRepository(ref, newRepo) + + logrus.Infof("Retrieving image %s from registry mirror %s", ref, registryMirror) + remoteImage, err := remote.Image(ref, remoteOptions(registryMirror, opts, customPlatform)...) + if err != nil { + logrus.Warnf("Failed to retrieve image %s from registry mirror %s: %s. Will try with the next mirror, or fallback to the default registry.", ref, registryMirror, err) + continue + } + + manifestCache[image] = remoteImage + + return remoteImage, nil + } + } + + registryName := ref.Context().RegistryStr() + if opts.InsecurePull || opts.InsecureRegistries.Contains(registryName) { + newReg, err := name.NewRegistry(registryName, name.WeakValidation, name.Insecure) + if err != nil { + return nil, err + } + ref = setNewRegistry(ref, newReg) + } + + logrus.Infof("Retrieving image %s from registry %s", ref, registryName) + + remoteImage, err := remote.Image(ref, remoteOptions(registryName, opts, customPlatform)...) + + if remoteImage != nil { + manifestCache[image] = remoteImage + } + + return remoteImage, err +} + +// normalizeReference adds the library/ prefix to images without it. +// +// It is mostly useful when using a registry mirror that is not able to perform +// this fix automatically. +func normalizeReference(ref name.Reference, image string) (name.Reference, error) { + if !strings.ContainsRune(image, '/') { + return name.ParseReference("library/"+image, name.WeakValidation) + } + + return ref, nil +} + +func setNewRepository(ref name.Reference, newRepo name.Repository) name.Reference { + switch r := ref.(type) { + case name.Tag: + r.Repository = newRepo + return r + case name.Digest: + r.Repository = newRepo + return r + default: + return ref + } +} + +func setNewRegistry(ref name.Reference, newReg name.Registry) name.Reference { + switch r := ref.(type) { + case name.Tag: + r.Repository.Registry = newReg + return r + case name.Digest: + r.Repository.Registry = newReg + return r + default: + return ref + } +} + +func remoteOptions(registryName string, opts config.RegistryOptions, customPlatform string) []remote.Option { + tr := util.MakeTransport(opts, registryName) + + // on which v1.Platform is this currently running? + platform := currentPlatform(customPlatform) + + return []remote.Option{remote.WithTransport(tr), remote.WithAuthFromKeychain(creds.GetKeychain()), remote.WithPlatform(platform)} +} + +// CurrentPlatform returns the v1.Platform on which the code runs +func currentPlatform(customPlatform string) v1.Platform { + if customPlatform != "" { + customPlatformArray := strings.Split(customPlatform, "/") + imagePlatform := v1.Platform{} + imagePlatform.OS = customPlatformArray[0] + if len(customPlatformArray) > 1 { + imagePlatform.Architecture = customPlatformArray[1] + if len(customPlatformArray) > 2 { + imagePlatform.Variant = customPlatformArray[2] + } + } + return imagePlatform + } + return v1.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + } +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/snapshot/layered_map.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/snapshot/layered_map.go new file mode 100644 index 000000000..976ad19d1 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/snapshot/layered_map.go @@ -0,0 +1,144 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package snapshot + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/GoogleContainerTools/kaniko/pkg/timing" + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/sirupsen/logrus" +) + +type LayeredMap struct { + layers []map[string]string + whiteouts []map[string]struct{} + layerHashCache map[string]string + hasher func(string) (string, error) + // cacheHasher doesn't include mtime in it's hash so that filesystem cache keys are stable + cacheHasher func(string) (string, error) +} + +func NewLayeredMap(h func(string) (string, error), c func(string) (string, error)) *LayeredMap { + l := LayeredMap{ + hasher: h, + cacheHasher: c, + } + l.layers = []map[string]string{} + l.layerHashCache = map[string]string{} + return &l +} + +func (l *LayeredMap) Snapshot() { + l.whiteouts = append(l.whiteouts, map[string]struct{}{}) + l.layers = append(l.layers, map[string]string{}) +} + +// Key returns a hash for added files +func (l *LayeredMap) Key() (string, error) { + c := bytes.NewBuffer([]byte{}) + enc := json.NewEncoder(c) + enc.Encode(l.layers) + return util.SHA256(c) +} + +// GetFlattenedPathsForWhiteOut returns all paths in the current FS +func (l *LayeredMap) getFlattenedPathsForWhiteOut() map[string]struct{} { + paths := map[string]struct{}{} + for _, l := range l.layers { + for p := range l { + if strings.HasPrefix(filepath.Base(p), ".wh.") { + delete(paths, p) + } + paths[p] = struct{}{} + } + } + return paths +} + +func (l *LayeredMap) Get(s string) (string, bool) { + for i := len(l.layers) - 1; i >= 0; i-- { + if v, ok := l.layers[i][s]; ok { + return v, ok + } + } + return "", false +} + +func (l *LayeredMap) GetWhiteout(s string) bool { + for i := len(l.whiteouts) - 1; i >= 0; i-- { + if _, ok := l.whiteouts[i][s]; ok { + return ok + } + } + return false +} + +func (l *LayeredMap) MaybeAddWhiteout(s string) bool { + ok := l.GetWhiteout(s) + if ok { + return false + } + l.whiteouts[len(l.whiteouts)-1][s] = struct{}{} + return true +} + +// Add will add the specified file s to the layered map. +func (l *LayeredMap) Add(s string) error { + // Use hash function and add to layers + newV, err := func(s string) (string, error) { + if v, ok := l.layerHashCache[s]; ok { + // clear it cache for next layer. + delete(l.layerHashCache, s) + return v, nil + } + return l.hasher(s) + }(s) + if err != nil { + return fmt.Errorf("error creating hash for %s: %v", s, err) + } + l.layers[len(l.layers)-1][s] = newV + return nil +} + +// CheckFileChange checks whether a given file changed +// from the current layered map by its hashing function. +// Returns true if the file is changed. +func (l *LayeredMap) CheckFileChange(s string) (bool, error) { + t := timing.Start("Hashing files") + defer timing.DefaultRun.Stop(t) + newV, err := l.hasher(s) + if err != nil { + // if this file does not exist in the new layer return. + if os.IsNotExist(err) { + logrus.Tracef("%s detected as changed but does not exist", s) + return false, nil + } + return false, err + } + l.layerHashCache[s] = newV + oldV, ok := l.Get(s) + if ok && newV == oldV { + return false, nil + } + return true, nil +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/snapshot/snapshot.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/snapshot/snapshot.go new file mode 100644 index 000000000..ba7371846 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/snapshot/snapshot.go @@ -0,0 +1,255 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package snapshot + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "syscall" + + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/GoogleContainerTools/kaniko/pkg/filesystem" + "github.com/GoogleContainerTools/kaniko/pkg/timing" + "github.com/GoogleContainerTools/kaniko/pkg/util" + + "github.com/sirupsen/logrus" +) + +// For testing +var snapshotPathPrefix = "" + +// Snapshotter holds the root directory from which to take snapshots, and a list of snapshots taken +type Snapshotter struct { + l *LayeredMap + directory string + ignorelist []util.IgnoreListEntry +} + +// NewSnapshotter creates a new snapshotter rooted at d +func NewSnapshotter(l *LayeredMap, d string) *Snapshotter { + return &Snapshotter{l: l, directory: d, ignorelist: util.IgnoreList()} +} + +// Init initializes a new snapshotter +func (s *Snapshotter) Init() error { + _, _, err := s.scanFullFilesystem() + return err +} + +// Key returns a string based on the current state of the file system +func (s *Snapshotter) Key() (string, error) { + return s.l.Key() +} + +// TakeSnapshot takes a snapshot of the specified files, avoiding directories in the ignorelist, and creates +// a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed +func (s *Snapshotter) TakeSnapshot(files []string, shdCheckDelete bool, forceBuildMetadata bool) (string, error) { + f, err := ioutil.TempFile(config.KanikoDir, "") + if err != nil { + return "", err + } + defer f.Close() + + s.l.Snapshot() + if len(files) == 0 && !forceBuildMetadata { + logrus.Info("No files changed in this command, skipping snapshotting.") + return "", nil + } + + filesToAdd, err := filesystem.ResolvePaths(files, s.ignorelist) + if err != nil { + return "", nil + } + + logrus.Info("Taking snapshot of files...") + logrus.Debugf("Taking snapshot of files %v", filesToAdd) + + sort.Strings(filesToAdd) + + // Add files to the layered map + for _, file := range filesToAdd { + if err := s.l.Add(file); err != nil { + return "", fmt.Errorf("unable to add file %s to layered map: %s", file, err) + } + } + + // Get whiteout paths + filesToWhiteout := []string{} + if shdCheckDelete { + _, deletedFiles := util.WalkFS(s.directory, s.l.getFlattenedPathsForWhiteOut(), func(s string) (bool, error) { + return true, nil + }) + // The paths left here are the ones that have been deleted in this layer. + for path := range deletedFiles { + // Only add the whiteout if the directory for the file still exists. + dir := filepath.Dir(path) + if _, ok := deletedFiles[dir]; !ok { + if s.l.MaybeAddWhiteout(path) { + logrus.Debugf("Adding whiteout for %s", path) + filesToWhiteout = append(filesToWhiteout, path) + } + } + } + } + + sort.Strings(filesToWhiteout) + + t := util.NewTar(f) + defer t.Close() + if err := writeToTar(t, filesToAdd, filesToWhiteout); err != nil { + return "", err + } + return f.Name(), nil +} + +// TakeSnapshotFS takes a snapshot of the filesystem, avoiding directories in the ignorelist, and creates +// a tarball of the changed files. +func (s *Snapshotter) TakeSnapshotFS() (string, error) { + f, err := ioutil.TempFile(s.getSnashotPathPrefix(), "") + if err != nil { + return "", err + } + defer f.Close() + t := util.NewTar(f) + defer t.Close() + + filesToAdd, filesToWhiteOut, err := s.scanFullFilesystem() + if err != nil { + return "", err + } + + if err := writeToTar(t, filesToAdd, filesToWhiteOut); err != nil { + return "", err + } + return f.Name(), nil +} + +func (s *Snapshotter) getSnashotPathPrefix() string { + if snapshotPathPrefix == "" { + return config.KanikoDir + } + return snapshotPathPrefix +} + +func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) { + logrus.Info("Taking snapshot of full filesystem...") + + // Some of the operations that follow (e.g. hashing) depend on the file system being synced, + // for example the hashing function that determines if files are equal uses the mtime of the files, + // which can lag if sync is not called. Unfortunately there can still be lag if too much data needs + // to be flushed or the disk does its own caching/buffering. + syscall.Sync() + + s.l.Snapshot() + + changedPaths, deletedPaths := util.WalkFS(s.directory, s.l.getFlattenedPathsForWhiteOut(), s.l.CheckFileChange) + timer := timing.Start("Resolving Paths") + + filesToAdd := []string{} + resolvedFiles, err := filesystem.ResolvePaths(changedPaths, s.ignorelist) + if err != nil { + return nil, nil, err + } + for _, path := range resolvedFiles { + if util.CheckIgnoreList(path) { + logrus.Tracef("Not adding %s to layer, as it's ignored", path) + continue + } + filesToAdd = append(filesToAdd, path) + } + + // The paths left here are the ones that have been deleted in this layer. + filesToWhiteOut := []string{} + for path := range deletedPaths { + // Only add the whiteout if the directory for the file still exists. + dir := filepath.Dir(path) + if _, ok := deletedPaths[dir]; !ok { + if s.l.MaybeAddWhiteout(path) { + logrus.Debugf("Adding whiteout for %s", path) + filesToWhiteOut = append(filesToWhiteOut, path) + } + } + } + timing.DefaultRun.Stop(timer) + + sort.Strings(filesToAdd) + sort.Strings(filesToWhiteOut) + + // Add files to the layered map + for _, file := range filesToAdd { + if err := s.l.Add(file); err != nil { + return nil, nil, fmt.Errorf("unable to add file %s to layered map: %s", file, err) + } + } + return filesToAdd, filesToWhiteOut, nil +} + +func writeToTar(t util.Tar, files, whiteouts []string) error { + timer := timing.Start("Writing tar file") + defer timing.DefaultRun.Stop(timer) + // Now create the tar. + for _, path := range whiteouts { + if err := t.Whiteout(path); err != nil { + return err + } + } + + addedPaths := make(map[string]bool) + for _, path := range files { + if _, fileExists := addedPaths[path]; fileExists { + continue + } + for _, parentPath := range util.ParentDirectories(path) { + if parentPath == "/" { + continue + } + if _, dirExists := addedPaths[parentPath]; dirExists { + continue + } + if err := t.AddFileToTar(parentPath); err != nil { + return err + } + addedPaths[parentPath] = true + } + if err := t.AddFileToTar(path); err != nil { + return err + } + addedPaths[path] = true + } + return nil +} + +// filesWithLinks returns the symlink and the target path if its exists. +func filesWithLinks(path string) ([]string, error) { + link, err := util.GetSymLink(path) + if err == util.ErrNotSymLink { + return []string{path}, nil + } else if err != nil { + return nil, err + } + // Add symlink if it exists in the FS + if !filepath.IsAbs(link) { + link = filepath.Join(filepath.Dir(path), link) + } + if _, err := os.Stat(link); err != nil { + return []string{path}, nil + } + return []string{path, link}, nil +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/timing/timing.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/timing/timing.go new file mode 100644 index 000000000..2d1103d96 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/timing/timing.go @@ -0,0 +1,101 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package timing + +import ( + "bytes" + "encoding/json" + "sync" + "text/template" + "time" +) + +// For testing +var currentTimeFunc = time.Now + +// DefaultRun is the default "singleton" TimedRun instance. +var DefaultRun = NewTimedRun() + +// TimedRun provides a running store of how long is spent in each category. +type TimedRun struct { + cl sync.Mutex + categories map[string]time.Duration // protected by cl +} + +// Stop stops the specified timer and increments the time spent in that category. +func (tr *TimedRun) Stop(t *Timer) { + stop := currentTimeFunc() + tr.cl.Lock() + defer tr.cl.Unlock() + if _, ok := tr.categories[t.category]; !ok { + tr.categories[t.category] = 0 + } + tr.categories[t.category] += stop.Sub(t.startTime) +} + +// Start starts a new Timer and returns it. +func Start(category string) *Timer { + t := Timer{ + category: category, + startTime: currentTimeFunc(), + } + return &t +} + +// NewTimedRun returns an initialized TimedRun instance. +func NewTimedRun() *TimedRun { + tr := TimedRun{ + categories: map[string]time.Duration{}, + } + return &tr +} + +// Timer represents a running timer. +type Timer struct { + category string + startTime time.Time +} + +// DefaultFormat is a default format string used by Summary. +var DefaultFormat = template.Must(template.New("").Parse("{{range $c, $t := .}}{{$c}}: {{$t}}\n{{end}}")) + +// Summary outputs a summary of the DefaultTimedRun. +func Summary() string { + return DefaultRun.Summary() +} + +func JSON() (string, error) { + return DefaultRun.JSON() +} + +// Summary outputs a summary of the specified TimedRun. +func (tr *TimedRun) Summary() string { + b := bytes.Buffer{} + + tr.cl.Lock() + defer tr.cl.Unlock() + DefaultFormat.Execute(&b, tr.categories) + return b.String() +} + +func (tr *TimedRun) JSON() (string, error) { + b, err := json.Marshal(tr.categories) + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/.editorconfig b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/.editorconfig new file mode 100644 index 000000000..c5319e7de --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/.editorconfig @@ -0,0 +1,3 @@ +root = true + +[*] diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/azureblob_util.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/azureblob_util.go new file mode 100644 index 000000000..e856e309a --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/azureblob_util.go @@ -0,0 +1,36 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "regexp" + + "github.com/GoogleContainerTools/kaniko/pkg/constants" +) + +// Validate if the host url provided is with correct suffix for AzureCloud, AzureChinaCloud, AzureGermanCloud and AzureUSGovernment +// RegEX for supported suffix defined in constants.AzureBlobStorageHostRegEx +func ValidAzureBlobStorageHost(context string) bool { + for _, re := range constants.AzureBlobStorageHostRegEx { + validBlobURL := regexp.MustCompile(re) + if validBlobURL.MatchString(context) { + return true + } + } + + return false +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/bucket_util.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/bucket_util.go new file mode 100644 index 000000000..6627339af --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/bucket_util.go @@ -0,0 +1,31 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "strings" + + "github.com/GoogleContainerTools/kaniko/pkg/constants" +) + +func GetBucketAndItem(context string) (string, string) { + split := strings.SplitN(context, "/", 2) + if len(split) == 2 && split[1] != "" { + return split[0], split[1] + } + return split[0], constants.ContextTar +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/command_util.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/command_util.go new file mode 100644 index 000000000..615ef99fa --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/command_util.go @@ -0,0 +1,449 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "net/http" + "net/url" + "os" + "os/user" + "path/filepath" + reflect "reflect" + "strconv" + "strings" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/GoogleContainerTools/kaniko/pkg/config" +) + +// for testing +var ( + getUIDAndGID = GetUIDAndGIDFromString +) + +const ( + pathSeparator = "/" +) + +// ResolveEnvironmentReplacementList resolves a list of values by calling resolveEnvironmentReplacement +func ResolveEnvironmentReplacementList(values, envs []string, isFilepath bool) ([]string, error) { + var resolvedValues []string + for _, value := range values { + resolved, err := ResolveEnvironmentReplacement(value, envs, isFilepath) + logrus.Debugf("Resolved %s to %s", value, resolved) + if err != nil { + return nil, err + } + resolvedValues = append(resolvedValues, resolved) + } + return resolvedValues, nil +} + +// ResolveEnvironmentReplacement resolves replacing env variables in some text from envs +// It takes in a string representation of the command, the value to be resolved, and a list of envs (config.Env) +// Ex: value = $foo/newdir, envs = [foo=/foodir], then this should return /foodir/newdir +// The dockerfile/shell package handles processing env values +// It handles escape characters and supports expansion from the config.Env array +// Shlex handles some of the following use cases (these and more are tested in integration tests) +// ""a'b'c"" -> "a'b'c" +// "Rex\ The\ Dog \" -> "Rex The Dog" +// "a\"b" -> "a"b" +func ResolveEnvironmentReplacement(value string, envs []string, isFilepath bool) (string, error) { + shlex := shell.NewLex(parser.DefaultEscapeToken) + fp, err := shlex.ProcessWord(value, envs) + // Check after replacement if value is a remote URL + if !isFilepath || IsSrcRemoteFileURL(fp) { + return fp, err + } + if err != nil { + return "", err + } + isDir := strings.HasSuffix(fp, pathSeparator) + fp = filepath.Clean(fp) + if isDir && !strings.HasSuffix(fp, pathSeparator) { + fp = fp + pathSeparator + } + return fp, nil +} + +func ResolveEnvAndWildcards(sd instructions.SourcesAndDest, fileContext FileContext, envs []string) ([]string, string, error) { + // First, resolve any environment replacement + resolvedEnvs, err := ResolveEnvironmentReplacementList(sd, envs, true) + if err != nil { + return nil, "", errors.Wrap(err, "failed to resolve environment") + } + if len(resolvedEnvs) == 0 { + return nil, "", errors.New("resolved envs is empty") + } + dest := resolvedEnvs[len(resolvedEnvs)-1] + // Resolve wildcards and get a list of resolved sources + srcs, err := ResolveSources(resolvedEnvs[0:len(resolvedEnvs)-1], fileContext.Root) + if err != nil { + return nil, "", errors.Wrap(err, "failed to resolve sources") + } + err = IsSrcsValid(sd, srcs, fileContext) + return srcs, dest, err +} + +// ContainsWildcards returns true if any entry in paths contains wildcards +func ContainsWildcards(paths []string) bool { + for _, path := range paths { + if strings.ContainsAny(path, "*?[") { + return true + } + } + return false +} + +// ResolveSources resolves the given sources if the sources contains wildcards +// It returns a list of resolved sources +func ResolveSources(srcs []string, root string) ([]string, error) { + // If sources contain wildcards, we first need to resolve them to actual paths + if !ContainsWildcards(srcs) { + return srcs, nil + } + logrus.Infof("Resolving srcs %v...", srcs) + files, err := RelativeFiles("", root) + if err != nil { + return nil, errors.Wrap(err, "resolving sources") + } + resolved, err := matchSources(srcs, files) + if err != nil { + return nil, errors.Wrap(err, "matching sources") + } + logrus.Debugf("Resolved sources to %v", resolved) + return resolved, nil +} + +// matchSources returns a list of sources that match wildcards +func matchSources(srcs, files []string) ([]string, error) { + var matchedSources []string + for _, src := range srcs { + if IsSrcRemoteFileURL(src) { + matchedSources = append(matchedSources, src) + continue + } + src = filepath.Clean(src) + for _, file := range files { + if filepath.IsAbs(src) { + file = filepath.Join(config.RootDir, file) + } + matched, err := filepath.Match(src, file) + if err != nil { + return nil, err + } + if matched || src == file { + matchedSources = append(matchedSources, file) + } + } + } + return matchedSources, nil +} + +func IsDestDir(path string) bool { + // try to stat the path + fileInfo, err := os.Stat(path) + if err != nil { + // fall back to string-based determination + return strings.HasSuffix(path, pathSeparator) || path == "." + } + // if it's a real path, check the fs response + return fileInfo.IsDir() +} + +// DestinationFilepath returns the destination filepath from the build context to the image filesystem +// If source is a file: +// If dest is a dir, copy it to /dest/relpath +// If dest is a file, copy directly to dest +// If source is a dir: +// Assume dest is also a dir, and copy to dest/ +// If dest is not an absolute filepath, add /cwd to the beginning +func DestinationFilepath(src, dest, cwd string) (string, error) { + _, srcFileName := filepath.Split(src) + newDest := dest + + if !filepath.IsAbs(newDest) { + newDest = filepath.Join(cwd, newDest) + // join call clean on all results. + if strings.HasSuffix(dest, pathSeparator) || strings.HasSuffix(dest, ".") { + newDest += pathSeparator + } + } + if IsDestDir(newDest) { + newDest = filepath.Join(newDest, srcFileName) + } + + if len(srcFileName) <= 0 && !strings.HasSuffix(newDest, pathSeparator) { + newDest += pathSeparator + } + + return newDest, nil +} + +// URLDestinationFilepath gives the destination a file from a remote URL should be saved to +func URLDestinationFilepath(rawurl, dest, cwd string, envs []string) (string, error) { + if !IsDestDir(dest) { + if !filepath.IsAbs(dest) { + return filepath.Join(cwd, dest), nil + } + return dest, nil + } + urlBase := filepath.Base(rawurl) + urlBase, err := ResolveEnvironmentReplacement(urlBase, envs, true) + if err != nil { + return "", err + } + destPath := filepath.Join(dest, urlBase) + + if !filepath.IsAbs(dest) { + destPath = filepath.Join(cwd, destPath) + } + return destPath, nil +} + +func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, resolvedSources []string, fileContext FileContext) error { + srcs := srcsAndDest[:len(srcsAndDest)-1] + dest := srcsAndDest[len(srcsAndDest)-1] + + if !ContainsWildcards(srcs) { + totalSrcs := 0 + for _, src := range srcs { + if fileContext.ExcludesFile(src) { + continue + } + totalSrcs++ + } + if totalSrcs > 1 && !IsDestDir(dest) { + return errors.New("when specifying multiple sources in a COPY command, destination must be a directory and end in '/'") + } + } + + // If there is only one source and it's a directory, docker assumes the dest is a directory + if len(resolvedSources) == 1 { + if IsSrcRemoteFileURL(resolvedSources[0]) { + return nil + } + path := filepath.Join(fileContext.Root, resolvedSources[0]) + fi, err := os.Lstat(path) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to get fileinfo for %v", path)) + } + if fi.IsDir() { + return nil + } + } + + totalFiles := 0 + for _, src := range resolvedSources { + if IsSrcRemoteFileURL(src) { + totalFiles++ + continue + } + src = filepath.Clean(src) + files, err := RelativeFiles(src, fileContext.Root) + if err != nil { + return errors.Wrap(err, "failed to get relative files") + } + for _, file := range files { + if fileContext.ExcludesFile(file) { + continue + } + totalFiles++ + } + } + if totalFiles == 0 { + return errors.New("copy failed: no source files specified") + } + // If there are wildcards, and the destination is a file, there must be exactly one file to copy over, + // Otherwise, return an error + if !IsDestDir(dest) && totalFiles > 1 { + return errors.New("when specifying multiple sources in a COPY command, destination must be a directory and end in '/'") + } + return nil +} + +func IsSrcRemoteFileURL(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + if err != nil { + return false + } + _, err = http.Get(rawurl) + return err == nil +} + +func UpdateConfigEnv(envVars []instructions.KeyValuePair, config *v1.Config, replacementEnvs []string) error { + newEnvs := make([]instructions.KeyValuePair, len(envVars)) + for index, pair := range envVars { + expandedKey, err := ResolveEnvironmentReplacement(pair.Key, replacementEnvs, false) + if err != nil { + return err + } + expandedValue, err := ResolveEnvironmentReplacement(pair.Value, replacementEnvs, false) + if err != nil { + return err + } + newEnvs[index] = instructions.KeyValuePair{ + Key: expandedKey, + Value: expandedValue, + } + } + + // First, convert config.Env array to []instruction.KeyValuePair + var kvps []instructions.KeyValuePair + for _, env := range config.Env { + entry := strings.SplitN(env, "=", 2) + kvps = append(kvps, instructions.KeyValuePair{ + Key: entry[0], + Value: entry[1], + }) + } + // Iterate through new environment variables, and replace existing keys + // We can't use a map because we need to preserve the order of the environment variables +Loop: + for _, newEnv := range newEnvs { + for index, kvp := range kvps { + // If key exists, replace the KeyValuePair... + if kvp.Key == newEnv.Key { + logrus.Debugf("Replacing environment variable %v with %v in config", kvp, newEnv) + kvps[index] = newEnv + continue Loop + } + } + // ... Else, append it as a new env variable + kvps = append(kvps, newEnv) + } + // Convert back to array and set in config + envArray := []string{} + for _, kvp := range kvps { + entry := kvp.Key + "=" + kvp.Value + envArray = append(envArray, entry) + } + config.Env = envArray + return nil +} + +func GetUserGroup(chownStr string, env []string) (int64, int64, error) { + if chownStr == "" { + return DoNotChangeUID, DoNotChangeGID, nil + } + + chown, err := ResolveEnvironmentReplacement(chownStr, env, false) + if err != nil { + return -1, -1, err + } + + uid32, gid32, err := getUIDAndGID(chown, true) + if err != nil { + return -1, -1, err + } + + return int64(uid32), int64(gid32), nil +} + +// Extract user and group id from a string formatted 'user:group'. +// If fallbackToUID is set, the gid is equal to uid if the group is not specified +// otherwise gid is set to zero. +func GetUIDAndGIDFromString(userGroupString string, fallbackToUID bool) (uint32, uint32, error) { + userAndGroup := strings.Split(userGroupString, ":") + userStr := userAndGroup[0] + var groupStr string + if len(userAndGroup) > 1 { + groupStr = userAndGroup[1] + } + + if reflect.TypeOf(userStr).String() == "int" { + return 0, 0, nil + } + + uidStr, gidStr, err := GetUserFromUsername(userStr, groupStr, fallbackToUID) + if err != nil { + return 0, 0, err + } + + // uid and gid need to be fit into uint32 + uid64, err := strconv.ParseUint(uidStr, 10, 32) + if err != nil { + return 0, 0, err + } + + gid64, err := strconv.ParseUint(gidStr, 10, 32) + if err != nil { + return 0, 0, err + } + + return uint32(uid64), uint32(gid64), nil +} + +func GetUserFromUsername(userStr string, groupStr string, fallbackToUID bool) (string, string, error) { + // Lookup by username + userObj, err := Lookup(userStr) + if err != nil { + return "", "", err + } + + // Same dance with groups + var group *user.Group + if groupStr != "" { + group, err = user.LookupGroup(groupStr) + if err != nil { + if _, ok := err.(user.UnknownGroupError); !ok { + return "", "", err + } + group, err = user.LookupGroupId(groupStr) + if err != nil { + return "", "", err + } + } + } + + uid := userObj.Uid + gid := "0" + if fallbackToUID { + gid = userObj.Gid + } + if group != nil { + gid = group.Gid + } + + return uid, gid, nil +} + +func Lookup(userStr string) (*user.User, error) { + userObj, err := user.Lookup(userStr) + if err != nil { + if _, ok := err.(user.UnknownUserError); !ok { + return nil, err + } + + // Lookup by id + u, e := user.LookupId(userStr) + if e != nil { + return nil, err + } + + userObj = u + } + + return userObj, nil +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/fs_util.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/fs_util.go new file mode 100644 index 000000000..d5b472559 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/fs_util.go @@ -0,0 +1,1042 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "archive/tar" + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/GoogleContainerTools/kaniko/pkg/timing" + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/karrick/godirwalk" + otiai10Cpy "github.com/otiai10/copy" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const DoNotChangeUID = -1 +const DoNotChangeGID = -1 + +const ( + snapshotTimeout = "SNAPSHOT_TIMEOUT_DURATION" + defaultTimeout = "90m" +) + +type IgnoreListEntry struct { + Path string + PrefixMatchOnly bool +} + +var defaultIgnoreList = []IgnoreListEntry{ + { + Path: config.KanikoDir, + PrefixMatchOnly: false, + }, + { + // similarly, we ignore /etc/mtab, since there is no way to know if the file was mounted or came + // from the base image + Path: "/etc/mtab", + PrefixMatchOnly: false, + }, + { + // we ingore /tmp/apt-key-gpghome, since the apt keys are added temporarily in this directory. + // from the base image + Path: "/tmp/apt-key-gpghome", + PrefixMatchOnly: true, + }, +} + +var ignorelist = append([]IgnoreListEntry{}, defaultIgnoreList...) + +var volumes = []string{} + +type FileContext struct { + Root string + ExcludedFiles []string +} + +type ExtractFunction func(string, *tar.Header, io.Reader) error + +type FSConfig struct { + includeWhiteout bool + extractFunc ExtractFunction +} + +type FSOpt func(*FSConfig) + +func IgnoreList() []IgnoreListEntry { + return ignorelist +} + +func AddToIgnoreList(entry IgnoreListEntry) { + ignorelist = append(ignorelist, entry) +} + +func AddToDefaultIgnoreList(entry IgnoreListEntry) { + defaultIgnoreList = append(defaultIgnoreList, entry) +} + +func IncludeWhiteout() FSOpt { + return func(opts *FSConfig) { + opts.includeWhiteout = true + } +} + +func ExtractFunc(extractFunc ExtractFunction) FSOpt { + return func(opts *FSConfig) { + opts.extractFunc = extractFunc + } +} + +// GetFSFromImage extracts the layers of img to root +// It returns a list of all files extracted +func GetFSFromImage(root string, img v1.Image, extract ExtractFunction) ([]string, error) { + if img == nil { + return nil, errors.New("image cannot be nil") + } + + layers, err := img.Layers() + if err != nil { + return nil, err + } + + return GetFSFromLayers(root, layers, ExtractFunc(extract)) +} + +func GetFSFromLayers(root string, layers []v1.Layer, opts ...FSOpt) ([]string, error) { + volumes = []string{} + cfg := new(FSConfig) + if err := InitIgnoreList(true); err != nil { + return nil, errors.Wrap(err, "initializing filesystem ignore list") + } + logrus.Debugf("Ignore list: %v", ignorelist) + + for _, opt := range opts { + opt(cfg) + } + + if cfg.extractFunc == nil { + return nil, errors.New("must supply an extract function") + } + + extractedFiles := []string{} + for i, l := range layers { + if mediaType, err := l.MediaType(); err == nil { + logrus.Tracef("Extracting layer %d of media type %s", i, mediaType) + } else { + logrus.Tracef("Extracting layer %d", i) + } + + r, err := l.Uncompressed() + if err != nil { + return nil, err + } + defer r.Close() + + tr := tar.NewReader(r) + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("error reading tar %d", i)) + } + + path := filepath.Join(root, filepath.Clean(hdr.Name)) + base := filepath.Base(path) + dir := filepath.Dir(path) + + if strings.HasPrefix(base, ".wh.") { + logrus.Debugf("Whiting out %s", path) + + name := strings.TrimPrefix(base, ".wh.") + path := filepath.Join(dir, name) + + if CheckIgnoreList(path) { + logrus.Debugf("Not deleting %s, as it's ignored", path) + continue + } + if childDirInIgnoreList(path) { + logrus.Debugf("Not deleting %s, as it contains a ignored path", path) + continue + } + + if err := os.RemoveAll(path); err != nil { + return nil, errors.Wrapf(err, "removing whiteout %s", hdr.Name) + } + + if !cfg.includeWhiteout { + logrus.Debug("not including whiteout files") + continue + } + + } + + if err := cfg.extractFunc(root, hdr, tr); err != nil { + return nil, err + } + + extractedFiles = append(extractedFiles, filepath.Join(root, filepath.Clean(hdr.Name))) + } + } + return extractedFiles, nil +} + +// DeleteFilesystem deletes the extracted image file system +func DeleteFilesystem() error { + logrus.Info("Deleting filesystem...") + return filepath.Walk(config.RootDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + // ignore errors when deleting. + return nil + } + + if CheckIgnoreList(path) { + if !isExist(path) { + logrus.Debugf("Path %s ignored, but not exists", path) + return nil + } + if info.IsDir() { + return filepath.SkipDir + } + logrus.Debugf("Not deleting %s, as it's ignored", path) + return nil + } + if childDirInIgnoreList(path) { + logrus.Debugf("Not deleting %s, as it contains a ignored path", path) + return nil + } + if path == config.RootDir { + return nil + } + return os.RemoveAll(path) + }) +} + +// isExists returns true if path exists +func isExist(path string) bool { + if _, err := os.Stat(path); err == nil { + return true + } + return false +} + +// childDirInIgnoreList returns true if there is a child file or directory of the path in the ignorelist +func childDirInIgnoreList(path string) bool { + for _, d := range ignorelist { + if HasFilepathPrefix(d.Path, path, d.PrefixMatchOnly) { + return true + } + } + return false +} + +// unTar returns a list of files that have been extracted from the tar archive at r to the path at dest +func unTar(r io.Reader, dest string) ([]string, error) { + var extractedFiles []string + tr := tar.NewReader(r) + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if err := ExtractFile(dest, hdr, tr); err != nil { + return nil, err + } + extractedFiles = append(extractedFiles, filepath.Join(dest, filepath.Clean(hdr.Name))) + } + return extractedFiles, nil +} + +func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error { + path := filepath.Join(dest, filepath.Clean(hdr.Name)) + base := filepath.Base(path) + dir := filepath.Dir(path) + mode := hdr.FileInfo().Mode() + uid := hdr.Uid + gid := hdr.Gid + + abs, err := filepath.Abs(path) + if err != nil { + return err + } + + if CheckIgnoreList(abs) && !checkIgnoreListRoot(dest) { + logrus.Debugf("Not adding %s because it is ignored", path) + return nil + } + switch hdr.Typeflag { + case tar.TypeReg: + logrus.Tracef("creating file %s", path) + + // It's possible a file is in the tar before its directory, + // or a file was copied over a directory prior to now + fi, err := os.Stat(dir) + if os.IsNotExist(err) || !fi.IsDir() { + logrus.Debugf("base %s for file %s does not exist. Creating.", base, path) + + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + } + + // Check if something already exists at path (symlinks etc.) + // If so, delete it + if FilepathExists(path) { + if err := os.RemoveAll(path); err != nil { + return errors.Wrapf(err, "error removing %s to make way for new file.", path) + } + } + + currFile, err := os.Create(path) + if err != nil { + return err + } + + if _, err = io.Copy(currFile, tr); err != nil { + return err + } + + if err = setFilePermissions(path, mode, uid, gid); err != nil { + return err + } + + if err = setFileTimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + + currFile.Close() + case tar.TypeDir: + logrus.Tracef("creating dir %s", path) + if err := mkdirAllWithPermissions(path, mode, int64(uid), int64(gid)); err != nil { + return err + } + + case tar.TypeLink: + logrus.Tracef("link from %s to %s", hdr.Linkname, path) + abs, err := filepath.Abs(hdr.Linkname) + if err != nil { + return err + } + if CheckIgnoreList(abs) { + logrus.Tracef("skipping symlink from %s to %s because %s is ignored", hdr.Linkname, path, hdr.Linkname) + return nil + } + // The base directory for a link may not exist before it is created. + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + // Check if something already exists at path + // If so, delete it + if FilepathExists(path) { + if err := os.RemoveAll(path); err != nil { + return errors.Wrapf(err, "error removing %s to make way for new link", hdr.Name) + } + } + link := filepath.Clean(filepath.Join(dest, hdr.Linkname)) + if err := os.Link(link, path); err != nil { + return err + } + + case tar.TypeSymlink: + logrus.Tracef("symlink from %s to %s", hdr.Linkname, path) + // The base directory for a symlink may not exist before it is created. + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + // Check if something already exists at path + // If so, delete it + if FilepathExists(path) { + if err := os.RemoveAll(path); err != nil { + return errors.Wrapf(err, "error removing %s to make way for new symlink", hdr.Name) + } + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + } + return nil +} + +func IsInProvidedIgnoreList(path string, wl []IgnoreListEntry) bool { + for _, entry := range wl { + if !entry.PrefixMatchOnly && path == entry.Path { + return true + } + } + + return false +} + +func IsInIgnoreList(path string) bool { + return IsInProvidedIgnoreList(path, ignorelist) +} + +func CheckProvidedIgnoreList(path string, wl []IgnoreListEntry) bool { + for _, wl := range ignorelist { + if HasFilepathPrefix(path, wl.Path, wl.PrefixMatchOnly) { + return true + } + } + + return false +} + +func CheckIgnoreList(path string) bool { + return CheckProvidedIgnoreList(path, ignorelist) +} + +func checkIgnoreListRoot(root string) bool { + if root == config.RootDir { + return false + } + return CheckIgnoreList(root) +} + +// Get ignorelist from roots of mounted files +// Each line of /proc/self/mountinfo is in the form: +// 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue +// (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) +// Where (5) is the mount point relative to the process's root +// From: https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func DetectFilesystemIgnoreList(path string) error { + logrus.Trace("Detecting filesystem ignore list") + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + reader := bufio.NewReader(f) + for { + line, err := reader.ReadString('\n') + logrus.Tracef("Read the following line from %s: %s", path, line) + if err != nil && err != io.EOF { + return err + } + lineArr := strings.Split(line, " ") + if len(lineArr) < 5 { + if err == io.EOF { + logrus.Tracef("Reached end of file %s", path) + break + } + continue + } + if lineArr[4] != config.RootDir { + logrus.Tracef("Adding ignore list entry %s from line: %s", lineArr[4], line) + ignorelist = append(ignorelist, IgnoreListEntry{ + Path: lineArr[4], + PrefixMatchOnly: false, + }) + } + if err == io.EOF { + logrus.Tracef("Reached end of file %s", path) + break + } + } + return nil +} + +// RelativeFiles returns a list of all files at the filepath relative to root +func RelativeFiles(fp string, root string) ([]string, error) { + var files []string + fullPath := filepath.Join(root, fp) + logrus.Debugf("Getting files and contents at root %s for %s", root, fullPath) + err := filepath.Walk(fullPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if CheckIgnoreList(path) && !HasFilepathPrefix(path, root, false) { + return nil + } + relPath, err := filepath.Rel(root, path) + if err != nil { + return err + } + files = append(files, relPath) + return nil + }) + return files, err +} + +// ParentDirectories returns a list of paths to all parent directories +// Ex. /some/temp/dir -> [/, /some, /some/temp, /some/temp/dir] +func ParentDirectories(path string) []string { + dir := filepath.Clean(path) + var paths []string + for { + if dir == filepath.Clean(config.RootDir) || dir == "" || dir == "." { + break + } + dir, _ = filepath.Split(dir) + dir = filepath.Clean(dir) + paths = append([]string{dir}, paths...) + } + if len(paths) == 0 { + paths = []string{config.RootDir} + } + return paths +} + +// ParentDirectoriesWithoutLeadingSlash returns a list of paths to all parent directories +// all subdirectories do not contain a leading / +// Ex. /some/temp/dir -> [/, some, some/temp, some/temp/dir] +func ParentDirectoriesWithoutLeadingSlash(path string) []string { + path = filepath.Clean(path) + dirs := strings.Split(path, "/") + dirPath := "" + paths := []string{config.RootDir} + for index, dir := range dirs { + if dir == "" || index == (len(dirs)-1) { + continue + } + dirPath = filepath.Join(dirPath, dir) + paths = append(paths, dirPath) + } + return paths +} + +// FilepathExists returns true if the path exists +func FilepathExists(path string) bool { + _, err := os.Lstat(path) + return !os.IsNotExist(err) +} + +// CreateFile creates a file at path and copies over contents from the reader +func CreateFile(path string, reader io.Reader, perm os.FileMode, uid uint32, gid uint32) error { + // Create directory path if it doesn't exist + if err := createParentDirectory(path); err != nil { + return errors.Wrap(err, "creating parent dir") + } + + dest, err := os.Create(path) + if err != nil { + return errors.Wrap(err, "creating file") + } + defer dest.Close() + if _, err := io.Copy(dest, reader); err != nil { + return errors.Wrap(err, "copying file") + } + return setFilePermissions(path, perm, int(uid), int(gid)) +} + +// AddVolumePath adds the given path to the volume ignorelist. +func AddVolumePathToIgnoreList(path string) { + logrus.Infof("adding volume %s to ignorelist", path) + ignorelist = append(ignorelist, IgnoreListEntry{ + Path: path, + PrefixMatchOnly: true, + }) + volumes = append(volumes, path) +} + +// DownloadFileToDest downloads the file at rawurl to the given dest for the ADD command +// From add command docs: +// 1. If is a remote file URL: +// - destination will have permissions of 0600 +// - If remote file has HTTP Last-Modified header, we set the mtime of the file to that timestamp +func DownloadFileToDest(rawurl, dest string, uid, gid int64) error { + resp, err := http.Get(rawurl) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return fmt.Errorf("invalid response status %d", resp.StatusCode) + } + + if err := CreateFile(dest, resp.Body, 0600, uint32(uid), uint32(gid)); err != nil { + return err + } + mTime := time.Time{} + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + return os.Chtimes(dest, mTime, mTime) +} + +// DetermineTargetFileOwnership returns the user provided uid/gid combination. +// If they are set to -1, the uid/gid from the original file is used. +func DetermineTargetFileOwnership(fi os.FileInfo, uid, gid int64) (int64, int64) { + if uid <= DoNotChangeUID { + uid = int64(fi.Sys().(*syscall.Stat_t).Uid) + } + if gid <= DoNotChangeGID { + gid = int64(fi.Sys().(*syscall.Stat_t).Gid) + } + return uid, gid +} + +// CopyDir copies the file or directory at src to dest +// It returns a list of files it copied over +func CopyDir(src, dest string, context FileContext, uid, gid int64) ([]string, error) { + files, err := RelativeFiles("", src) + if err != nil { + return nil, errors.Wrap(err, "copying dir") + } + var copiedFiles []string + for _, file := range files { + fullPath := filepath.Join(src, file) + fi, err := os.Lstat(fullPath) + if err != nil { + return nil, errors.Wrap(err, "copying dir") + } + if context.ExcludesFile(fullPath) { + logrus.Debugf("%s found in .dockerignore, ignoring", src) + continue + } + destPath := filepath.Join(dest, file) + if fi.IsDir() { + logrus.Tracef("Creating directory %s", destPath) + + mode := fi.Mode() + uid, gid = DetermineTargetFileOwnership(fi, uid, gid) + if err := mkdirAllWithPermissions(destPath, mode, uid, gid); err != nil { + return nil, err + } + } else if IsSymlink(fi) { + // If file is a symlink, we want to create the same relative symlink + if _, err := CopySymlink(fullPath, destPath, context); err != nil { + return nil, err + } + } else { + // ... Else, we want to copy over a file + if _, err := CopyFile(fullPath, destPath, context, uid, gid); err != nil { + return nil, err + } + } + copiedFiles = append(copiedFiles, destPath) + } + return copiedFiles, nil +} + +// CopySymlink copies the symlink at src to dest. +func CopySymlink(src, dest string, context FileContext) (bool, error) { + if context.ExcludesFile(src) { + logrus.Debugf("%s found in .dockerignore, ignoring", src) + return true, nil + } + if FilepathExists(dest) { + if err := os.RemoveAll(dest); err != nil { + return false, err + } + } + if err := createParentDirectory(dest); err != nil { + return false, err + } + link, err := os.Readlink(src) + if err != nil { + logrus.Debugf("could not read link for %s", src) + } + return false, os.Symlink(link, dest) +} + +// CopyFile copies the file at src to dest +func CopyFile(src, dest string, context FileContext, uid, gid int64) (bool, error) { + if context.ExcludesFile(src) { + logrus.Debugf("%s found in .dockerignore, ignoring", src) + return true, nil + } + if src == dest { + // This is a no-op. Move on, but don't list it as ignored. + // We have to make sure we do this so we don't overwrite our own file. + // See iusse #904 for an example. + return false, nil + } + fi, err := os.Stat(src) + if err != nil { + return false, err + } + logrus.Debugf("Copying file %s to %s", src, dest) + srcFile, err := os.Open(src) + if err != nil { + return false, err + } + defer srcFile.Close() + uid, gid = DetermineTargetFileOwnership(fi, uid, gid) + return false, CreateFile(dest, srcFile, fi.Mode(), uint32(uid), uint32(gid)) +} + +func NewFileContextFromDockerfile(dockerfilePath, buildcontext string) (FileContext, error) { + fileContext := FileContext{Root: buildcontext} + excludedFiles, err := getExcludedFiles(dockerfilePath, buildcontext) + if err != nil { + return fileContext, err + } + fileContext.ExcludedFiles = excludedFiles + return fileContext, nil +} + +// getExcludedFiles returns a list of files to exclude from the .dockerignore +func getExcludedFiles(dockerfilePath, buildcontext string) ([]string, error) { + path := dockerfilePath + ".dockerignore" + if !FilepathExists(path) { + path = filepath.Join(buildcontext, ".dockerignore") + } + if !FilepathExists(path) { + return nil, nil + } + logrus.Infof("Using dockerignore file: %v", path) + contents, err := ioutil.ReadFile(path) + if err != nil { + return nil, errors.Wrap(err, "parsing .dockerignore") + } + reader := bytes.NewBuffer(contents) + return dockerignore.ReadAll(reader) +} + +// ExcludesFile returns true if the file context specified this file should be ignored. +// Usually this is specified via .dockerignore +func (c FileContext) ExcludesFile(path string) bool { + if HasFilepathPrefix(path, c.Root, false) { + var err error + path, err = filepath.Rel(c.Root, path) + if err != nil { + logrus.Errorf("unable to get relative path, including %s in build: %v", path, err) + return false + } + } + match, err := fileutils.Matches(path, c.ExcludedFiles) + if err != nil { + logrus.Errorf("error matching, including %s in build: %v", path, err) + return false + } + return match +} + +// HasFilepathPrefix checks if the given file path begins with prefix +func HasFilepathPrefix(path, prefix string, prefixMatchOnly bool) bool { + prefix = filepath.Clean(prefix) + prefixArray := strings.Split(prefix, "/") + path = filepath.Clean(path) + pathArray := strings.SplitN(path, "/", len(prefixArray)+1) + + if len(pathArray) < len(prefixArray) { + return false + } + if prefixMatchOnly && len(pathArray) == len(prefixArray) { + return false + } + + for index := range prefixArray { + m, err := filepath.Match(prefixArray[index], pathArray[index]) + if err != nil { + return false + } + if !m { + return false + } + } + return true +} + +func Volumes() []string { + return volumes +} + +func mkdirAllWithPermissions(path string, mode os.FileMode, uid, gid int64) error { + // Check if a file already exists on the path, if yes then delete it + info, err := os.Stat(path) + if !os.IsNotExist(err) && !info.IsDir() { + logrus.Tracef("removing file because it needs to be a directory %s", path) + if err := os.Remove(path); err != nil { + return errors.Wrapf(err, "error removing %s to make way for new directory.", path) + } + } + + if err := os.MkdirAll(path, mode); err != nil { + return err + } + if uid > math.MaxUint32 || gid > math.MaxUint32 { + // due to https://github.com/golang/go/issues/8537 + return errors.New(fmt.Sprintf("Numeric User-ID or Group-ID greater than %v are not properly supported.", uint64(math.MaxUint32))) + } + if err := os.Chown(path, int(uid), int(gid)); err != nil { + return err + } + // In some cases, MkdirAll doesn't change the permissions, so run Chmod + // Must chmod after chown because chown resets the file mode. + return os.Chmod(path, mode) +} + +func setFilePermissions(path string, mode os.FileMode, uid, gid int) error { + if err := os.Chown(path, uid, gid); err != nil { + return err + } + // manually set permissions on file, since the default umask (022) will interfere + // Must chmod after chown because chown resets the file mode. + return os.Chmod(path, mode) +} + +func setFileTimes(path string, aTime, mTime time.Time) error { + // The zero value of time.Time is not a valid argument to os.Chtimes as it cannot be + // converted into a valid argument to the syscall that os.Chtimes uses. If mTime or + // aTime are zero we convert them to the zero value for Unix Epoch. + if mTime.IsZero() { + logrus.Tracef("mod time for %s is zero, converting to zero for epoch", path) + mTime = time.Unix(0, 0) + } + + if aTime.IsZero() { + logrus.Tracef("access time for %s is zero, converting to zero for epoch", path) + aTime = time.Unix(0, 0) + } + + // We set AccessTime because its a required arg but we only care about + // ModTime. The file will get accessed again so AccessTime will change. + if err := os.Chtimes(path, aTime, mTime); err != nil { + return errors.Wrapf( + err, + "couldn't modify times: atime %v mtime %v", + aTime, + mTime, + ) + } + + return nil +} + +// CreateTargetTarfile creates target tar file for downloading the context file. +// Make directory if directory does not exist +func CreateTargetTarfile(tarpath string) (*os.File, error) { + baseDir := filepath.Dir(tarpath) + if _, err := os.Lstat(baseDir); os.IsNotExist(err) { + logrus.Debugf("baseDir %s for file %s does not exist. Creating.", baseDir, tarpath) + if err := os.MkdirAll(baseDir, 0755); err != nil { + return nil, err + } + } + return os.Create(tarpath) + +} + +// Returns true if a file is a symlink +func IsSymlink(fi os.FileInfo) bool { + return fi.Mode()&os.ModeSymlink != 0 +} + +var ErrNotSymLink = fmt.Errorf("not a symlink") + +func GetSymLink(path string) (string, error) { + if err := getSymlink(path); err != nil { + return "", err + } + return os.Readlink(path) +} + +func EvalSymLink(path string) (string, error) { + if err := getSymlink(path); err != nil { + return "", err + } + return filepath.EvalSymlinks(path) +} + +func getSymlink(path string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + if !IsSymlink(fi) { + return ErrNotSymLink + } + return nil +} + +// For cross stage dependencies kaniko must persist the referenced path so that it can be used in +// the dependent stage. For symlinks we copy the target path because copying the symlink would +// result in a dead link +func CopyFileOrSymlink(src string, destDir string, root string) error { + destFile := filepath.Join(destDir, src) + src = filepath.Join(root, src) + if fi, _ := os.Lstat(src); IsSymlink(fi) { + link, err := os.Readlink(src) + if err != nil { + return errors.Wrap(err, "copying file or symlink") + } + if err := createParentDirectory(destFile); err != nil { + return err + } + return os.Symlink(link, destFile) + } + return otiai10Cpy.Copy(src, destFile) +} + +func createParentDirectory(path string) error { + baseDir := filepath.Dir(path) + if info, err := os.Lstat(baseDir); os.IsNotExist(err) { + logrus.Tracef("baseDir %s for file %s does not exist. Creating.", baseDir, path) + if err := os.MkdirAll(baseDir, 0755); err != nil { + return err + } + } else if IsSymlink(info) { + logrus.Infof("destination cannot be a symlink %v", baseDir) + return errors.New("destination cannot be a symlink") + } + return nil +} + +// InitIgnoreList will initialize the ignore list using: +// - defaultIgnoreList +// - mounted paths via DetectFilesystemIgnoreList() +func InitIgnoreList(detectFilesystem bool) error { + logrus.Trace("Initializing ignore list") + ignorelist = append([]IgnoreListEntry{}, defaultIgnoreList...) + + if detectFilesystem { + if err := DetectFilesystemIgnoreList(config.IgnoreListPath); err != nil { + return errors.Wrap(err, "checking filesystem mount paths for ignore list") + } + } + + return nil +} + +type walkFSResult struct { + filesAdded []string + existingPaths map[string]struct{} +} + +// WalkFS given a directory and list of existing files, +// returns a list of changed filed determined by changeFunc and a list +// of deleted files. +// It timesout after 90 mins. Can be configured via setting an environment variable +// SNAPSHOT_TIMEOUT in the kaniko pod definition. +func WalkFS(dir string, existingPaths map[string]struct{}, changeFunc func(string) (bool, error)) ([]string, map[string]struct{}) { + timeOutStr := os.Getenv(snapshotTimeout) + if timeOutStr == "" { + logrus.Tracef("%s environment not set. Using default snapshot timeout %s", snapshotTimeout, defaultTimeout) + timeOutStr = defaultTimeout + } + timeOut, err := time.ParseDuration(timeOutStr) + if err != nil { + logrus.Fatalf("could not parse duration %s", timeOutStr) + } + timer := timing.Start("Walking filesystem with timeout") + ch := make(chan walkFSResult, 1) + + go func() { + ch <- gowalkDir(dir, existingPaths, changeFunc) + }() + + // Listen on our channel AND a timeout channel - which ever happens first. + select { + case res := <-ch: + timing.DefaultRun.Stop(timer) + return res.filesAdded, res.existingPaths + case <-time.After(timeOut): + timing.DefaultRun.Stop(timer) + logrus.Fatalf("timed out snapshotting FS in %s", timeOutStr) + return nil, nil + } +} + +func gowalkDir(dir string, existingPaths map[string]struct{}, changeFunc func(string) (bool, error)) walkFSResult { + foundPaths := make([]string, 0) + godirwalk.Walk(dir, &godirwalk.Options{ + Callback: func(path string, ent *godirwalk.Dirent) error { + logrus.Tracef("Analyzing path %s", path) + if IsInIgnoreList(path) { + if IsDestDir(path) { + logrus.Tracef("Skipping paths under %s, as it is a ignored directory", path) + return filepath.SkipDir + } + return nil + } + delete(existingPaths, path) + if t, err := changeFunc(path); err != nil { + return err + } else if t { + foundPaths = append(foundPaths, path) + } + return nil + }, + Unsorted: true, + }, + ) + return walkFSResult{foundPaths, existingPaths} +} + +// GetFSInfoMap given a directory gets a map of FileInfo for all files +func GetFSInfoMap(dir string, existing map[string]os.FileInfo) (map[string]os.FileInfo, []string) { + fileMap := map[string]os.FileInfo{} + foundPaths := []string{} + timer := timing.Start("Walking filesystem with Stat") + godirwalk.Walk(dir, &godirwalk.Options{ + Callback: func(path string, ent *godirwalk.Dirent) error { + if CheckIgnoreList(path) { + if IsDestDir(path) { + logrus.Tracef("Skipping paths under %s, as it is a ignored directory", path) + return filepath.SkipDir + } + return nil + } + if fi, err := os.Lstat(path); err == nil { + if fiPrevious, ok := existing[path]; ok { + // check if file changed + if !isSame(fiPrevious, fi) { + fileMap[path] = fi + foundPaths = append(foundPaths, path) + } + } else { + // new path + fileMap[path] = fi + foundPaths = append(foundPaths, path) + } + + } + return nil + }, + Unsorted: true, + }, + ) + timing.DefaultRun.Stop(timer) + return fileMap, foundPaths +} + +func isSame(fi1, fi2 os.FileInfo) bool { + return fi1.Mode() == fi2.Mode() && + // file modification time + fi1.ModTime() == fi2.ModTime() && + // file size + fi1.Size() == fi2.Size() && + // file user id + uint64(fi1.Sys().(*syscall.Stat_t).Uid) == uint64(fi2.Sys().(*syscall.Stat_t).Uid) && + // file group id is + uint64(fi1.Sys().(*syscall.Stat_t).Gid) == uint64(fi2.Sys().(*syscall.Stat_t).Gid) +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/groupids_cgo.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/groupids_cgo.go new file mode 100644 index 000000000..376762e07 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/groupids_cgo.go @@ -0,0 +1,30 @@ +//go:build (linux || darwin) && cgo +// +build linux darwin +// +build cgo + +/* +Copyright 2020 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "os/user" +) + +// groupIDs returns all of the group ID's a user is a member of +func groupIDs(u *user.User) ([]string, error) { + return u.GroupIds() +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/groupids_fallback.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/groupids_fallback.go new file mode 100644 index 000000000..41552674d --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/groupids_fallback.go @@ -0,0 +1,93 @@ +//go:build (linux || darwin) && !cgo +// +build linux darwin +// +build !cgo + +/* +Copyright 2020 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bufio" + "bytes" + "io" + "os" + "os/user" + "strconv" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var groupFile = "/etc/group" + +type group struct { + id string // group ID + name string // group name + members []string // secondary group ids +} + +// groupIDs returns all of the group ID's a user is a member of +func groupIDs(u *user.User) ([]string, error) { + logrus.Infof("performing slow lookup of group ids for %s", u.Username) + + f, err := os.Open(groupFile) + if err != nil { + return nil, errors.Wrap(err, "open") + } + defer f.Close() + + gids := []string{u.Gid} + + for _, g := range localGroups(f) { + for _, m := range g.members { + if m == u.Username { + gids = append(gids, g.id) + } + } + } + + return gids, nil +} + +// localGroups parses a reader in /etc/group form, returning parsed group data +// based on src/os/user/lookup_unix.go - but extended to include secondary groups +func localGroups(r io.Reader) []*group { + var groups []*group + + bs := bufio.NewScanner(r) + for bs.Scan() { + line := bs.Bytes() + + // There's no spec for /etc/passwd or /etc/group, but we try to follow + // the same rules as the glibc parser, which allows comments and blank + // space at the beginning of a line. + line = bytes.TrimSpace(line) + if len(line) == 0 || line[0] == '#' { + continue + } + + // wheel:*:0:root,anotherGrp + parts := strings.SplitN(string(line), ":", 4) + if _, err := strconv.Atoi(parts[2]); err != nil { + continue + } + + groups = append(groups, &group{name: parts[0], id: parts[2], members: strings.Split(parts[3], ",")}) + } + return groups +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/syscall_credentials.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/syscall_credentials.go new file mode 100644 index 000000000..f0a2a3406 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/syscall_credentials.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "strconv" + "syscall" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func SyscallCredentials(userStr string) (*syscall.Credential, error) { + uid, gid, err := GetUIDAndGIDFromString(userStr, true) + if err != nil { + return nil, errors.Wrap(err, "get uid/gid") + } + + u, err := Lookup(userStr) + if err != nil { + return nil, errors.Wrap(err, "lookup") + } + logrus.Infof("util.Lookup returned: %+v", u) + + var groups []uint32 + + gidStr, err := groupIDs(u) + if err != nil { + return nil, errors.Wrap(err, "group ids for user") + } + + for _, g := range gidStr { + i, err := strconv.ParseUint(g, 10, 32) + if err != nil { + return nil, errors.Wrap(err, "parseuint") + } + + groups = append(groups, uint32(i)) + } + + return &syscall.Credential{ + Uid: uid, + Gid: gid, + Groups: groups, + }, nil +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/tar_util.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/tar_util.go new file mode 100644 index 000000000..ba9dc8738 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/tar_util.go @@ -0,0 +1,249 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "archive/tar" + "compress/bzip2" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/docker/docker/pkg/archive" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Tar knows how to write files to a tar file. +type Tar struct { + hardlinks map[uint64]string + w *tar.Writer +} + +// NewTar will create an instance of Tar that can write files to the writer at f. +func NewTar(f io.Writer) Tar { + w := tar.NewWriter(f) + return Tar{ + w: w, + hardlinks: map[uint64]string{}, + } +} + +// Close will close any open streams used by Tar. +func (t *Tar) Close() { + t.w.Close() +} + +// AddFileToTar adds the file at path p to the tar +func (t *Tar) AddFileToTar(p string) error { + i, err := os.Lstat(p) + if err != nil { + return fmt.Errorf("Failed to get file info for %s: %s", p, err) + } + linkDst := "" + if i.Mode()&os.ModeSymlink != 0 { + var err error + linkDst, err = os.Readlink(p) + if err != nil { + return err + } + } + if i.Mode()&os.ModeSocket != 0 { + logrus.Infof("ignoring socket %s, not adding to tar", i.Name()) + return nil + } + hdr, err := tar.FileInfoHeader(i, linkDst) + if err != nil { + return err + } + + if p == config.RootDir { + // allow entry for / to preserve permission changes etc. (currently ignored anyway by Docker runtime) + hdr.Name = "/" + } else { + // Docker uses no leading / in the tarball + hdr.Name = strings.TrimPrefix(p, config.RootDir) + hdr.Name = strings.TrimLeft(hdr.Name, "/") + } + if hdr.Typeflag == tar.TypeDir && !strings.HasSuffix(hdr.Name, "/") { + hdr.Name = hdr.Name + "/" + } + // rootfs may not have been extracted when using cache, preventing uname/gname from resolving + // this makes this layer unnecessarily differ from a cached layer which does contain this information + hdr.Uname = "" + hdr.Gname = "" + + hardlink, linkDst := t.checkHardlink(p, i) + if hardlink { + hdr.Linkname = linkDst + hdr.Typeflag = tar.TypeLink + hdr.Size = 0 + } + if err := t.w.WriteHeader(hdr); err != nil { + return err + } + if !(i.Mode().IsRegular()) || hardlink { + return nil + } + r, err := os.Open(p) + if err != nil { + return err + } + defer r.Close() + if _, err := io.Copy(t.w, r); err != nil { + return err + } + return nil +} + +func (t *Tar) Whiteout(p string) error { + dir := filepath.Dir(p) + name := ".wh." + filepath.Base(p) + + th := &tar.Header{ + // Docker uses no leading / in the tarball + Name: strings.TrimLeft(filepath.Join(dir, name), "/"), + Size: 0, + } + if err := t.w.WriteHeader(th); err != nil { + return err + } + + return nil +} + +// Returns true if path is hardlink, and the link destination +func (t *Tar) checkHardlink(p string, i os.FileInfo) (bool, string) { + hardlink := false + linkDst := "" + stat := getSyscallStatT(i) + if stat != nil { + nlinks := stat.Nlink + if nlinks > 1 { + inode := stat.Ino + if original, exists := t.hardlinks[inode]; exists && original != p { + hardlink = true + logrus.Debugf("%s inode exists in hardlinks map, linking to %s", p, original) + linkDst = original + } else { + t.hardlinks[inode] = p + } + } + } + return hardlink, linkDst +} + +func getSyscallStatT(i os.FileInfo) *syscall.Stat_t { + if sys := i.Sys(); sys != nil { + if stat, ok := sys.(*syscall.Stat_t); ok { + return stat + } + } + return nil +} + +// UnpackLocalTarArchive unpacks the tar archive at path to the directory dest +// Returns the files extracted from the tar archive +func UnpackLocalTarArchive(path, dest string) ([]string, error) { + // First, we need to check if the path is a local tar archive + if compressed, compressionLevel := fileIsCompressedTar(path); compressed { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + if compressionLevel == archive.Gzip { + return nil, UnpackCompressedTar(path, dest) + } else if compressionLevel == archive.Bzip2 { + bzr := bzip2.NewReader(file) + return unTar(bzr, dest) + } + } + if fileIsUncompressedTar(path) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + return unTar(file, dest) + } + return nil, errors.New("path does not lead to local tar archive") +} + +//IsFileLocalTarArchive returns true if the file is a local tar archive +func IsFileLocalTarArchive(src string) bool { + compressed, _ := fileIsCompressedTar(src) + uncompressed := fileIsUncompressedTar(src) + return compressed || uncompressed +} + +func fileIsCompressedTar(src string) (bool, archive.Compression) { + r, err := os.Open(src) + if err != nil { + return false, -1 + } + defer r.Close() + buf, err := ioutil.ReadAll(r) + if err != nil { + return false, -1 + } + compressionLevel := archive.DetectCompression(buf) + return (compressionLevel > 0), compressionLevel +} + +func fileIsUncompressedTar(src string) bool { + r, err := os.Open(src) + if err != nil { + return false + } + defer r.Close() + fi, err := os.Lstat(src) + if err != nil { + return false + } + if fi.Size() == 0 { + return false + } + tr := tar.NewReader(r) + if tr == nil { + return false + } + _, err = tr.Next() + return err == nil +} + +// UnpackCompressedTar unpacks the compressed tar at path to dir +func UnpackCompressedTar(path, dir string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + gzr, err := gzip.NewReader(file) + if err != nil { + return err + } + defer gzr.Close() + _, err = unTar(gzr, dir) + return err +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/transport_util.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/transport_util.go new file mode 100644 index 000000000..61ca2aeba --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/transport_util.go @@ -0,0 +1,82 @@ +/* +Copyright 2020 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "crypto/tls" + "crypto/x509" + + "io/ioutil" + "net/http" + + "github.com/GoogleContainerTools/kaniko/pkg/config" + "github.com/sirupsen/logrus" +) + +type CertPool interface { + value() *x509.CertPool + append(path string) error +} + +type X509CertPool struct { + inner x509.CertPool +} + +func (p *X509CertPool) value() *x509.CertPool { + return &p.inner +} + +func (p *X509CertPool) append(path string) error { + pem, err := ioutil.ReadFile(path) + if err != nil { + return err + } + p.inner.AppendCertsFromPEM(pem) + return nil +} + +var systemCertLoader CertPool + +func init() { + systemCertPool, err := x509.SystemCertPool() + if err != nil { + logrus.Warn("Failed to load system cert pool. Loading empty one instead.") + systemCertPool = x509.NewCertPool() + } + systemCertLoader = &X509CertPool{ + inner: *systemCertPool, + } +} + +func MakeTransport(opts config.RegistryOptions, registryName string) http.RoundTripper { + // Create a transport to set our user-agent. + var tr http.RoundTripper = http.DefaultTransport.(*http.Transport).Clone() + if opts.SkipTLSVerify || opts.SkipTLSVerifyRegistries.Contains(registryName) { + tr.(*http.Transport).TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } else if certificatePath := opts.RegistriesCertificates[registryName]; certificatePath != "" { + if err := systemCertLoader.append(certificatePath); err != nil { + logrus.WithError(err).Warnf("Failed to load certificate %s for %s\n", certificatePath, registryName) + } else { + tr.(*http.Transport).TLSClientConfig = &tls.Config{ + RootCAs: systemCertLoader.value(), + } + } + } + return tr +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/util.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/util.go new file mode 100644 index 000000000..600f3d1bb --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/util/util.go @@ -0,0 +1,174 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "crypto/md5" + "crypto/sha256" + "encoding/hex" + "io" + "io/ioutil" + "math" + "os" + "strconv" + "sync" + "syscall" + "time" + + "github.com/minio/highwayhash" + "github.com/sirupsen/logrus" +) + +// Hasher returns a hash function, used in snapshotting to determine if a file has changed +func Hasher() func(string) (string, error) { + pool := sync.Pool{ + New: func() interface{} { + b := make([]byte, highwayhash.Size*10*1024) + return &b + }, + } + key := make([]byte, highwayhash.Size) + hasher := func(p string) (string, error) { + h, _ := highwayhash.New(key) + fi, err := os.Lstat(p) + if err != nil { + return "", err + } + h.Write([]byte(fi.Mode().String())) + h.Write([]byte(fi.ModTime().String())) + + h.Write([]byte(strconv.FormatUint(uint64(fi.Sys().(*syscall.Stat_t).Uid), 36))) + h.Write([]byte(",")) + h.Write([]byte(strconv.FormatUint(uint64(fi.Sys().(*syscall.Stat_t).Gid), 36))) + + if fi.Mode().IsRegular() { + f, err := os.Open(p) + if err != nil { + return "", err + } + defer f.Close() + buf := pool.Get().(*[]byte) + defer pool.Put(buf) + if _, err := io.CopyBuffer(h, f, *buf); err != nil { + return "", err + } + } + + return hex.EncodeToString(h.Sum(nil)), nil + } + return hasher +} + +// CacheHasher takes into account everything the regular hasher does except for mtime +func CacheHasher() func(string) (string, error) { + hasher := func(p string) (string, error) { + h := md5.New() + fi, err := os.Lstat(p) + if err != nil { + return "", err + } + h.Write([]byte(fi.Mode().String())) + + h.Write([]byte(strconv.FormatUint(uint64(fi.Sys().(*syscall.Stat_t).Uid), 36))) + h.Write([]byte(",")) + h.Write([]byte(strconv.FormatUint(uint64(fi.Sys().(*syscall.Stat_t).Gid), 36))) + + if fi.Mode().IsRegular() { + f, err := os.Open(p) + if err != nil { + return "", err + } + defer f.Close() + if _, err := io.Copy(h, f); err != nil { + return "", err + } + } + + return hex.EncodeToString(h.Sum(nil)), nil + } + return hasher +} + +// MtimeHasher returns a hash function, which only looks at mtime to determine if a file has changed. +// Note that the mtime can lag, so it's possible that a file will have changed but the mtime may look the same. +func MtimeHasher() func(string) (string, error) { + hasher := func(p string) (string, error) { + h := md5.New() + fi, err := os.Lstat(p) + if err != nil { + return "", err + } + h.Write([]byte(fi.ModTime().String())) + return hex.EncodeToString(h.Sum(nil)), nil + } + return hasher +} + +// RedoHasher returns a hash function, which looks at mtime, size, filemode, owner uid and gid +// Note that the mtime can lag, so it's possible that a file will have changed but the mtime may look the same. +func RedoHasher() func(string) (string, error) { + hasher := func(p string) (string, error) { + h := md5.New() + fi, err := os.Lstat(p) + if err != nil { + return "", err + } + h.Write([]byte(fi.Mode().String())) + h.Write([]byte(fi.ModTime().String())) + h.Write([]byte(strconv.FormatInt(fi.Size(), 16))) + h.Write([]byte(strconv.FormatUint(uint64(fi.Sys().(*syscall.Stat_t).Uid), 36))) + h.Write([]byte(",")) + h.Write([]byte(strconv.FormatUint(uint64(fi.Sys().(*syscall.Stat_t).Gid), 36))) + + return hex.EncodeToString(h.Sum(nil)), nil + } + return hasher +} + +// SHA256 returns the shasum of the contents of r +func SHA256(r io.Reader) (string, error) { + hasher := sha256.New() + _, err := io.Copy(hasher, r) + if err != nil { + return "", err + } + return hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), nil +} + +// GetInputFrom returns Reader content +func GetInputFrom(r io.Reader) ([]byte, error) { + output, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return output, nil +} + +type retryFunc func() error + +// Retry retries an operation +func Retry(operation retryFunc, retryCount int, initialDelayMilliseconds int) error { + err := operation() + for i := 0; err != nil && i < retryCount; i++ { + sleepDuration := time.Millisecond * time.Duration(int(math.Pow(2, float64(i)))*initialDelayMilliseconds) + logrus.Warnf("Retrying operation after %s due to %v", sleepDuration, err) + time.Sleep(sleepDuration) + err = operation() + } + + return err +} diff --git a/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/version/version.go b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/version/version.go new file mode 100644 index 000000000..8d388d5b8 --- /dev/null +++ b/extender/code/vendor/github.com/GoogleContainerTools/kaniko/pkg/version/version.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Set with LDFLAGS +var version = "unset" + +func Version() string { + return version +} diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/.gitignore b/extender/code/vendor/github.com/Microsoft/go-winio/.gitignore new file mode 100644 index 000000000..b883f1fdc --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/.gitignore @@ -0,0 +1 @@ +*.exe diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/LICENSE b/extender/code/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 000000000..b8b569d77 --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/README.md b/extender/code/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 000000000..568001057 --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,22 @@ +# go-winio + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. + +Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe +for another named pipe implementation. diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/backup.go b/extender/code/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 000000000..2be34af43 --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,280 @@ +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +const ( + WRITE_DAC = 0x40000 + WRITE_OWNER = 0x80000 + ACCESS_SYSTEM_SECURITY = 0x1000000 +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamId struct { + StreamId uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } + if _, err := io.Copy(ioutil.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamId + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamId, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamId == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamId{ + StreamId: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{"BackupRead", r.f.Name(), err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + winPath, err := syscall.UTF16FromString(path) + if err != nil { + return nil, err + } + h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/ea.go b/extender/code/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 000000000..4051c1b33 --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/file.go b/extender/code/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 000000000..0385e4108 --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,323 @@ +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } +func (b *atomicBool) swap(new bool) bool { + var newInt int32 + if new { + newInt = 1 + } + return atomic.SwapInt32((*int32)(b), newInt) == 1 +} + +const ( + cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIo() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + wgLock sync.RWMutex + closing atomicBool + socket bool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomicBool +} + +// makeWin32File makes a new win32File from an existing file handle +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIo) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil +} + +// closeHandle closes the resources associated with a Win32 handle +func (f *win32File) closeHandle() { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.swap(true) { + f.wgLock.Unlock() + // cancel all IO and wait for it to complete + cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } else { + f.wgLock.Unlock() + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// prepareIo prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIo() (*ioOperation, error) { + f.wgLock.RLock() + if f.closing.isSet() { + f.wgLock.RUnlock() + return nil, ErrFileClosed + } + f.wg.Add(1) + f.wgLock.RUnlock() + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever +func ioCompletionProcessor(h syscall.Handle) { + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { + return int(bytes), err + } + + if f.closing.isSet() { + cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + if f.closing.isSet() { + err = ErrFileClosed + } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) + } + case <-timeout: + cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return syscall.FlushFileBuffers(f.handle) +} + +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.setFalse() + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.setTrue() + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/fileinfo.go b/extender/code/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 000000000..ada2fbab6 --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,61 @@ +// +build windows + +package winio + +import ( + "os" + "runtime" + "syscall" + "unsafe" +) + +//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx +//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle + +const ( + fileBasicInfo = 0 + fileIDInfo = 0x12 +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime + FileAttributes uint32 + pad uint32 // padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return bi, nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/go.mod b/extender/code/vendor/github.com/Microsoft/go-winio/go.mod new file mode 100644 index 000000000..b3846826b --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/go.mod @@ -0,0 +1,9 @@ +module github.com/Microsoft/go-winio + +go 1.12 + +require ( + github.com/pkg/errors v0.8.1 + github.com/sirupsen/logrus v1.4.1 + golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b +) diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/go.sum b/extender/code/vendor/github.com/Microsoft/go-winio/go.sum new file mode 100644 index 000000000..babb4a70d --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/go.sum @@ -0,0 +1,16 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/hvsock.go b/extender/code/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 000000000..dbfe790ee --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,305 @@ +package winio + +import ( + "fmt" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const ( + afHvSock = 34 // AF_HYPERV + + socketError = ^uintptr(0) +) + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +// Network returns the address's network name, "hvsock". +func (addr *HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHvSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +func newHvSocket() (*win32File, error) { + fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + sa := addr.raw() + err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = syscall.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIo() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) + _, err = l.sock.asyncIo(c, nil, bytes, err) + if err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + conn := &HvsockConn{ + sock: sock, + } + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +/* Need to finish ConnectEx handling +func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { + sock, err := newHvSocket() + if err != nil { + return nil, err + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := sock.prepareIo() + if err != nil { + return nil, err + } + defer sock.wg.Done() + var bytes uint32 + err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) + _, err = sock.asyncIo(ctx, c, nil, bytes, err) + if err != nil { + return nil, err + } + conn := &HvsockConn{ + sock: sock, + remote: *addr, + } + sock = nil + return conn, nil +} +*/ + +func (conn *HvsockConn) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsarecv", err) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsasend", err) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) shutdown(how int) error { + err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) + if err != nil { + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(syscall.SHUT_RD) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, notifying the other endpoint that +// no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(syscall.SHUT_WR) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + conn.SetReadDeadline(t) + conn.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/pipe.go b/extender/code/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 000000000..d6a46f6a2 --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,510 @@ +// +build windows + +package winio + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "runtime" + "syscall" + "time" + "unsafe" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *unicodeString + Attributes uintptr + SecurityDescriptor *securityDescriptor + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type securityDescriptor struct { + Revision byte + Sbz1 byte + Control uint16 + Owner uintptr + Group uintptr + Sacl uintptr + Dacl uintptr +} + +type ntstatus int32 + +func (status ntstatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} + +const ( + cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_NO_DATA = syscall.Errno(232) + cERROR_PIPE_CONNECTED = syscall.Errno(535) + cERROR_SEM_TIMEOUT = syscall.Errno(121) + + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 + + cPIPE_TYPE_MESSAGE = 4 + + cPIPE_READMODE_MESSAGE = 2 + + cFILE_OPEN = 1 + cFILE_CREATE = 2 + + cFILE_PIPE_MESSAGE_TYPE = 1 + cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 + + cSE_DACL_PRESENT = 4 +) + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + // This error should match net.errClosing since docker takes a dependency on its text. + ErrPipeListenerClosed = errors.New("use of closed network connection") + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + f.SetReadDeadline(t) + f.SetWriteDeadline(t) + return nil +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } else if err == syscall.ERROR_MORE_DATA { + // ERROR_MORE_DATA indicates that the pipe's read mode is message mode + // and the message still has more bytes. Treat this as a success, since + // this package presents all named pipes as byte streams. + err = nil + } + return n, err +} + +func (s pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. +func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) { + for { + select { + case <-ctx.Done(): + return syscall.Handle(0), ctx.Err() + default: + h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err == nil { + return h, nil + } + if err != cERROR_PIPE_BUSY { + return h, &os.PathError{Err: err, Op: "open", Path: *path} + } + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(time.Millisecond * 10) + } + } +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then we use +// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } else { + absTimeout = time.Now().Add(time.Second * 2) + } + ctx, _ := context.WithDeadline(context.Background(), absTimeout) + conn, err := DialPipeContext(ctx, path) + if err == context.DeadlineExceeded { + return nil, ErrTimeout + } + return conn, err +} + +// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` +// cancellation or timeout. +func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { + var err error + var h syscall.Handle + h, err = tryDialPipe(ctx, &path) + if err != nil { + return nil, err + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&cPIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + path16, err := syscall.UTF16FromString(path) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + var oa objectAttributes + oa.Length = unsafe.Sizeof(oa) + + var ntPath unicodeString + if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + defer localFree(ntPath.Buffer) + oa.ObjectName = &ntPath + + // The security descriptor is only needed for the first pipe. + if first { + if sd != nil { + len := uint32(len(sd)) + sdb := localAlloc(0, len) + defer localFree(sdb) + copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) + oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) + } else { + // Construct the default named pipe security descriptor. + var dacl uintptr + if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { + return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + } + defer localFree(dacl) + + sdb := &securityDescriptor{ + Revision: 1, + Control: cSE_DACL_PRESENT, + Dacl: dacl, + } + oa.SecurityDescriptor = sdb + } + } + + typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) + if c.MessageMode { + typ |= cFILE_PIPE_MESSAGE_TYPE + } + + disposition := uint32(cFILE_OPEN) + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + if first { + disposition = cFILE_CREATE + // By not asking for read or write access, the named pipe file system + // will put this pipe into an initially disconnected state, blocking + // client connections until the next call with first == false. + access = syscall.SYNCHRONIZE + } + + timeout := int64(-50 * 10000) // 50ms + + var ( + h syscall.Handle + iosb ioStatusBlock + ) + err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + runtime.KeepAlive(ntPath) + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, nil, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { + p, err := l.makeServerPipe() + if err != nil { + return nil, err + } + + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + } + return p, err +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + var ( + p *win32File + err error + ) + for { + p, err = l.makeConnectedServerPipe() + // If the connection was immediately closed by the client, try + // again. + if err != cERROR_NO_DATA { + break + } + } + responseCh <- acceptResponse{p, err} + closed = err == ErrPipeListenerClosed + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size the input buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + l := &win32PipeListener{ + firstHandle: h, + path: path, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIo() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIo(c, nil, 0, err) + if err != nil && err != cERROR_PIPE_CONNECTED { + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/extender/code/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go new file mode 100644 index 000000000..586406577 --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -0,0 +1,235 @@ +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding +package guid + +import ( + "crypto/rand" + "crypto/sha1" + "encoding" + "encoding/binary" + "fmt" + "strconv" + + "golang.org/x/sys/windows" +) + +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID + +// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. +func NewV4() (GUID, error) { + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + return GUID{}, err + } + + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) + + return g, nil +} + +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { + return fmt.Sprintf( + "%08x-%04x-%04x-%04x-%012x", + g.Data1, + g.Data2, + g.Data3, + g.Data4[:2], + g.Data4[2:]) +} + +// FromString parses a string containing a GUID and returns the GUID. The only +// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +// format. +func FromString(s string) (GUID, error) { + if len(s) != 36 { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + + var g GUID + + data1, err := strconv.ParseUint(s[0:8], 16, 32) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data1 = uint32(data1) + + data2, err := strconv.ParseUint(s[9:13], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data2 = uint16(data2) + + data3, err := strconv.ParseUint(s[14:18], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data3 = uint16(data3) + + for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { + v, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data4[i] = uint8(v) + } + + return g, nil +} + +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d +} + +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) + if err != nil { + return err + } + *g = g2 + return nil +} diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/privilege.go b/extender/code/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 000000000..9c83d36fe --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,202 @@ +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + SE_PRIVILEGE_ENABLED = 2 + + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" +) + +const ( + securityAnonymous = iota + securityIdentification + securityImpersonation + securityDelegation +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "" + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } else { + s = "Could not enable privilege " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + var privileges []uint64 + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p, _ := windows.GetCurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + binary.Write(&b, binary.LittleEndian, p) + binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(securityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/reparse.go b/extender/code/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 000000000..fc1ee4d3a --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,128 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + binary.Write(&b, binary.LittleEndian, flags) + } + + binary.Write(&b, binary.LittleEndian, ntTarget16) + binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/sd.go b/extender/code/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 000000000..db1b370a1 --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,98 @@ +// +build windows + +package winio + +import ( + "syscall" + "unsafe" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +const ( + cERROR_NONE_MAPPED = syscall.Errno(1332) +) + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch e.Err { + case cERROR_NONE_MAPPED: + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +// LookupSidByName looks up the SID of an account by name +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to including an aribtrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/syscall.go b/extender/code/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 000000000..5cb52bc74 --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,3 @@ +package winio + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/extender/code/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/extender/code/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 000000000..e26b01faf --- /dev/null +++ b/extender/code/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,562 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procLocalFree = modkernel32.NewProc("LocalFree") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") + procbind = modws2_32.NewProc("bind") +) + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) + return +} + +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntstatus(r0) + return +} + +func rtlNtStatusToDosError(status ntstatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntstatus(r0) + return +} + +func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntstatus(r0) + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/extender/code/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/extender/code/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 000000000..899129ecc --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 000000000..99849c0e1 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,164 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 000000000..9cf7eaf40 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,221 @@ +package awserr + +import ( + "encoding/hex" + "fmt" +) + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string + bytes []byte +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += e[i].Error() + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 000000000..1a3d106d5 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 000000000..142a7a01c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type they are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 000000000..a4eb6a7f4 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,221 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.EqualFold(name, c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 000000000..710eb432f --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,113 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 000000000..645df2450 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,88 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 000000000..03334d692 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,97 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + PartitionID string + Endpoint string + SigningRegion string + SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = DefaultRetryerMaxNumRetries + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 000000000..9f6af19dd --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,177 @@ +package client + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. +// +type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. + NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration +} + +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay + } + + retryCount := r.RetryCount + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) + } + return delay + initialDelay +} + +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + return r.IsErrorRetryable() || r.IsErrorThrottle() +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 000000000..8958c32d4 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,194 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 000000000..0c48f72e0 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,14 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + ServiceID string + APIVersion string + PartitionID string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 000000000..881d575f0 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/config.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 000000000..2def23fa1 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,586 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the client.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + + // Set this to `true` to have the S3 service client to use the region specified + // in the ARN, when an ARN is provided as an argument to a bucket parameter. + S3UseARNRegion *bool + + // Set this to `true` to enable the SDK to unmarshal API response header maps to + // normalized lower case map keys. + // + // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case + // Metadata member's map keys. The value of the header in the map is unaffected. + LowerCaseHeaderMaps *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpoint to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + +} + +// WithS3UseARNRegion sets a config S3UseARNRegion value and +// returning a Config pointer for chaining +func (c *Config) WithS3UseARNRegion(enable bool) *Config { + c.S3UseARNRegion = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + + if other.S3UseARNRegion != nil { + dst.S3UseARNRegion = other.S3UseARNRegion + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go new file mode 100644 index 000000000..2866f9a7f --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -0,0 +1,37 @@ +// +build !go1.9 + +package aws + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 000000000..3718b26e1 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,11 @@ +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 000000000..2f9446333 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package aws + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.BackgroundCtx +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 000000000..9c29f29af --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 000000000..304fd1561 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 000000000..4e076c183 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,918 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 000000000..aa902d708 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,230 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 5 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(5 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all request errors, and let the default retrier determine + // if the error is retryable. + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } + }} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 000000000..7d50b1557 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 000000000..ab69c7a6f --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 000000000..3ad1e798d --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true. + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go new file mode 100644 index 000000000..5852b2648 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package credentials + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.BackgroundCtx +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 000000000..388b21541 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go new file mode 100644 index 000000000..8152a864a --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go @@ -0,0 +1,39 @@ +// +build !go1.9 + +package credentials + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 000000000..4356edb3d --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,13 @@ +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 000000000..9f8fd92a5 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,339 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "fmt" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/sync/singleflight" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(Context) (Value, error) +} + +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now + } + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds atomic.Value + sf singleflight.Group + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + c := &Credentials{ + provider: provider, + } + c.creds.Store(Value{}) + return c +} + +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + if curCreds := c.creds.Load(); !c.isExpired(curCreds) { + return curCreds.(Value), nil + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } +} + +func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) { + if curCreds := c.creds.Load(); !c.isExpired(curCreds) { + return curCreds.(Value), nil + } + + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() + } + if err == nil { + c.creds.Store(creds) + } + + return creds, err +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(backgroundContext()) +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.creds.Store(Value{}) +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + return c.isExpired(c.creds.Load()) +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() +} + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName), + nil) + } + if c.creds.Load().(Value) == (Value{}) { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} + +type suppressedContext struct { + Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 000000000..92af5b725 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,188 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + return m.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + credsList, err := requestCredList(ctx, m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(ctx, m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New(request.ErrCodeSerialization, + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 000000000..785f30d8e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,210 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) + req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 000000000..54c5cf733 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,74 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 000000000..7fc91d9d2 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 000000000..e62483600 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,426 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = int(8 * sdkio.KibiByte) + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 000000000..e15514958 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,150 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.OpenFile(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) + } + + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + nil) + } + + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.String("aws_session_token") + + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 000000000..cbba1e3d5 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. Token is only required +// for temporary security credentials retrieved via STS, otherwise an empty +// string can be passed for this parameter. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 000000000..6846ef6f8 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,363 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +type assumeRolerWithContext interface { + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. + Tags []*sts.Tag + + // A list of keys for session tags that you want to set as transitive. + // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. + TransitiveTagKeys []*string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*sts.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + Tags: p.Tags, + PolicyArns: p.PolicyArns, + TransitiveTagKeys: p.TransitiveTagKeys, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + var roleOutput *sts.AssumeRoleOutput + var err error + + if c, ok := p.Client.(assumeRolerWithContext); ok { + roleOutput, err = c.AssumeRoleWithContext(ctx, input) + } else { + roleOutput, err = p.Client.AssumeRole(input) + } + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 000000000..6feb262b2 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,135 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// TokenFetcher shuold return WebIdentity token bytes or an error +type TokenFetcher interface { + FetchToken(credentials.Context) ([]byte, error) +} + +// FetchTokenPath is a path to a WebIdentity token file +type FetchTokenPath string + +// FetchToken returns a token by reading from the filesystem +func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { + data, err := ioutil.ReadFile(string(f)) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", f) + return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + return data, nil +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + PolicyArns []*sts.PolicyDescriptorType + + client stsiface.STSAPI + ExpiryWindow time.Duration + + tokenFetcher TokenFetcher + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) +} + +// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI and a TokenFetcher +func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { + return &WebIdentityRoleProvider{ + client: svc, + tokenFetcher: tokenFetcher, + roleARN: roleARN, + roleSessionName: roleSessionName, + } +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + b, err := p.tokenFetcher.FetchToken(ctx) + if err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.PolicyArns, + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + }) + + req.SetContext(ctx) + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 000000000..25a66d1dd --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,69 @@ +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. +// +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. +// +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. +// r.InjectHandlers(&sess.Handlers) +// +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +package csm diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 000000000..4b19e2800 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,89 @@ +package csm + +import ( + "fmt" + "strings" + "sync" +) + +var ( + lock sync.Mutex +) + +const ( + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" +) + +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// r, err := csm.Start("clientID", "127.0.0.1:31000") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 000000000..5bacc791a --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 000000000..82a3e345e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,55 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused *int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + paused: new(int64), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 000000000..54a99280c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 000000000..835bcd49c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,264 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case request.ErrCodeRequestError, + request.ErrCodeSerialization, + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 000000000..23bb639e0 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,207 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 000000000..ca0ee1dcc --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 000000000..4fcb61618 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 000000000..a716c021c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,250 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// getToken uses the duration to return a token for EC2 metadata service, +// or an error if the request failed. +func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { + op := &request.Operation{ + Name: "GetToken", + HTTPMethod: "PUT", + HTTPPath: "/api/token", + } + + var output tokenOutput + req := c.NewRequest(op, nil, &output) + req.SetContext(ctx) + + // remove the fetch token handler from the request handlers to avoid infinite recursion + req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) + + // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. + req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) + + ttl := strconv.FormatInt(int64(duration/time.Second), 10) + req.HTTPRequest.Header.Set(ttlHeader, ttl) + + err := req.Send() + + // Errors with bad request status should be returned. + if err != nil { + err = awserr.NewRequestFailure( + awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), + req.HTTPResponse.StatusCode, req.RequestID) + } + + return output, err +} + +// GetMetadata uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + return c.GetMetadataWithContext(aws.BackgroundContext(), p) +} + +// GetMetadataWithContext uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/meta-data", p), + } + output := &metadataOutput{} + + req := c.NewRequest(op, nil, output) + + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + return c.GetUserDataWithContext(aws.BackgroundContext()) +} + +// GetUserDataWithContext returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) +} + +// GetDynamicDataWithContext uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) +} + +// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + return c.IAMInfoWithContext(aws.BackgroundContext()) +} + +// IAMInfoWithContext retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { + resp, err := c.GetMetadataWithContext(ctx, "iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + return c.RegionWithContext(aws.BackgroundContext()) +} + +// RegionWithContext returns the region the instance is running in. +func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) + if err != nil { + return "", err + } + // extract region from the ec2InstanceIdentityDocument + region := ec2InstanceIdentityDocument.Region + if len(region) == 0 { + return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) + } + // returns region + return region, nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + return c.AvailableWithContext(aws.BackgroundContext()) +} + +// AvailableWithContext returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { + if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 000000000..b8b2940d7 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,228 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "os" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ServiceName is the name of the service. + ServiceName = "ec2metadata" + disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Headers for Token and TTL + ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" + tokenHeader = "x-aws-ec2-metadata-token" + + // Named Handler constants + fetchTokenHandlerName = "FetchTokenHandler" + unmarshalMetadataHandlerName = "unmarshalMetadataHandler" + unmarshalTokenHandlerName = "unmarshalTokenHandler" + enableTokenProviderHandlerName = "enableTokenProviderHandler" + + // TTL constants + defaultTTL = 21600 * time.Second + ttlExpirationWindow = 30 * time.Second +) + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 1 * time.Second, + } + // max number of retries on the client operation + cfg.MaxRetries = aws.Int(2) + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + // token provider instance + tp := newTokenProvider(svc, defaultTTL) + + // NamedHandler for fetching token + svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ + Name: fetchTokenHandlerName, + Fn: tp.fetchTokenHandler, + }) + // NamedHandler for enabling token provider + svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ + Name: enableTokenProviderHandlerName, + Fn: tp.enableTokenProviderHandler, + }) + + svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Disable the EC2 Metadata service if the environment variable is set. + // This short-circuits the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +type tokenOutput struct { + Token string + TTL time.Duration +} + +// unmarshal token handler is used to parse the response of a getToken operation +var unmarshalTokenHandler = request.NamedHandler{ + Name: unmarshalTokenHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + v := r.HTTPResponse.Header.Get(ttlHeader) + data, ok := r.Data.(*tokenOutput) + if !ok { + return + } + + data.Token = b.String() + // TTL is in seconds + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, + "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + t := time.Duration(i) * time.Second + data.TTL = t + }, +} + +var unmarshalHandler = request.NamedHandler{ + Name: unmarshalMetadataHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } + }, +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), + r.HTTPResponse.StatusCode, r.RequestID) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())), + r.HTTPResponse.StatusCode, r.RequestID) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go new file mode 100644 index 000000000..d0a3a020d --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -0,0 +1,92 @@ +package ec2metadata + +import ( + "net/http" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A tokenProvider struct provides access to EC2Metadata client +// and atomic instance of a token, along with configuredTTL for it. +// tokenProvider also provides an atomic flag to disable the +// fetch token operation. +// The disabled member will use 0 as false, and 1 as true. +type tokenProvider struct { + client *EC2Metadata + token atomic.Value + configuredTTL time.Duration + disabled uint32 +} + +// A ec2Token struct helps use of token in EC2 Metadata service ops +type ec2Token struct { + token string + credentials.Expiry +} + +// newTokenProvider provides a pointer to a tokenProvider instance +func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { + return &tokenProvider{client: c, configuredTTL: duration} +} + +// fetchTokenHandler fetches token for EC2Metadata service client by default. +func (t *tokenProvider) fetchTokenHandler(r *request.Request) { + + // short-circuits to insecure data flow if tokenProvider is disabled. + if v := atomic.LoadUint32(&t.disabled); v == 1 { + return + } + + if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + return + } + + output, err := t.client.getToken(r.Context(), t.configuredTTL) + + if err != nil { + + // change the disabled flag on token provider to true, + // when error is request timeout error. + if requestFailureError, ok := err.(awserr.RequestFailure); ok { + switch requestFailureError.StatusCode() { + case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: + atomic.StoreUint32(&t.disabled, 1) + case http.StatusBadRequest: + r.Error = requestFailureError + } + + // Check if request timed out while waiting for response + if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { + if e.Code() == request.ErrCodeRequestError { + atomic.StoreUint32(&t.disabled, 1) + } + } + } + return + } + + newToken := ec2Token{ + token: output.Token, + } + newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) + t.token.Store(newToken) + + // Inject token header to the request. + if ec2Token, ok := t.token.Load().(ec2Token); ok { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + } +} + +// enableTokenProviderHandler enables the token provider +func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { + // If the error code status is 401, we enable the token provider + if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && + e.StatusCode() == http.StatusUnauthorized { + atomic.StoreUint32(&t.disabled, 0) + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 000000000..654fb1ad5 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,216 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRegionalS3(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { + return + } + + custAddDualstack(p, "s3") + custAddDualstack(p, "s3-control") +} + +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints["aws-global"]; ok { + return + } + + service.PartitionEndpoint = "aws-global" + service.Endpoints["us-east-1"] = endpoint{} + service.Endpoints["aws-global"] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + } + + p.Services["s3"] = service +} + +func custAddDualstack(p *partition, svcName string) { + s, ok := p.Services[svcName] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services[svcName] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + if a := s.Defaults.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := s.Defaults.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + s.Defaults.CredentialScope.Service = "application-autoscaling" + s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 000000000..1ad2c42bb --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,8937 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. +) + +// AWS Standard partition's regions. +const ( + AfSouth1RegionID = "af-south-1" // Africa (Cape Town). + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuWest1RegionID = "eu-west-1" // Europe (Ireland). + EuWest2RegionID = "eu-west-2" // Europe (London). + EuWest3RegionID = "eu-west-3" // Europe (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). +) + +// AWS ISO (US) partition's regions. +const ( + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, + awsisoPartition, + awsisobPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "Europe (Frankfurt)", + }, + "eu-north-1": region{ + Description: "Europe (Stockholm)", + }, + "eu-south-1": region{ + Description: "Europe (Milan)", + }, + "eu-west-1": region{ + Description: "Europe (Ireland)", + }, + "eu-west-2": region{ + Description: "Europe (London)", + }, + "eu-west-3": region{ + Description: "Europe (Paris)", + }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "access-analyzer": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.detective": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "ap-east-1": endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-south-1": endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.elastic-inference": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + "eu-west-1": endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + "us-east-1": endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + "us-east-2": endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + "us-west-2": endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", + }, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appmesh": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips": endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar-connections": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "connect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "data.mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dataexchange": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "dms-fips": endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-af-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecast": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecastquery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "groundstation": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "me-south-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "iam-fips": endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotevents": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "macie": service{ + + Endpoints: endpoints{ + "fips-us-east-1": endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "managedblockchain": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconnect": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "me-south-1": endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "oidc": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "portal.sso": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "rds-fips.ca-central-1": endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rds-fips.us-east-1": endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rds-fips.us-east-2": endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rds-fips.us-west-1": endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rds-fips.us-west-2": endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "aws-global": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-east-2-fips": endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-west-2-fips": endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "schemas": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-3": endpoint{ + Protocols: []string{"https"}, + }, + "me-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "session.qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "ssm-facade-fips-us-east-1": endpoint{ + Hostname: "ssm-facade-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ssm-facade-fips-us-east-2": endpoint{ + Hostname: "ssm-facade-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "ssm-facade-fips-us-west-1": endpoint{ + Hostname: "ssm-facade-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "ssm-facade-fips-us-west-2": endpoint{ + Hostname: "ssm-facade-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-fips": endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "cn-northwest-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US-West)", + }, + }, + Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "directconnect.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "directconnect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "rds.us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "rds.us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "redshift.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "redshift.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "route53resolver": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "sqs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "sqs.us-gov-west-1.amazonaws.com", + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "ssm-facade-fips-us-gov-east-1": endpoint{ + Hostname: "ssm-facade.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "ssm-facade-fips-us-gov-west-1": endpoint{ + Hostname: "ssm-facade.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-us-gov-global", + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-b-global", + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + }, +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 000000000..ca8fc828e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 000000000..84316b92c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 000000000..ca956e5f1 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,564 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unknown and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint +} + +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. +type STSRegionalEndpoint int + +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id, dnsSuffix string + p *partition +} + +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned may look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := make(map[string]Region, len(p.p.Regions)) + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := make(map[string]Service, len(p.p.Services)) + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if r, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The endpoint partition + PartitionID string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 000000000..df75e899a --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 000000000..eb2ac83c9 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,341 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || + (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { + if _, ok := legacyGlobalRegions[service][region]; ok { + region = "aws-global" + } + } + + e, hasEndpoint := s.endpointForRegion(region) + if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + region = signingRegion + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + return ResolvedEndpoint{ + URL: u, + PartitionID: partitionID, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 000000000..0fdfcc56e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,351 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" $.Resolver }} + + {{ range $_, $partition := $.Resolver }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} + + {{ template "endpoint resolvers" $.Resolver }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 000000000..fa06f7a8f --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,13 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 000000000..91a6f277a --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 000000000..6ed15b2ec --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,118 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 000000000..d9b37f4d3 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,18 @@ +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true + } + + return false +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 000000000..e819ab6c0 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,343 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + BuildStream HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalStream HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + CompleteAttempt HandlerList + Complete HandlerList +} + +// Copy returns a copy of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + BuildStream: h.BuildStream.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers. +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.BuildStream.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalStream.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.CompleteAttempt.Clear() + h.Complete.Clear() +} + +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.BuildStream.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} + +// WithSetRequestHeaders updates the operation request's HTTP header to contain +// the header key value pairs provided. If the header key already exists in the +// request's HTTP header set, the existing value(s) will be replaced. +func WithSetRequestHeaders(h map[string]string) Option { + return withRequestHeader(h).SetRequestHeaders +} + +type withRequestHeader map[string]string + +func (h withRequestHeader) SetRequestHeaders(r *Request) { + for k, v := range h { + r.HTTPRequest.Header[k] = []string{v} + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 000000000..79f79602b --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 000000000..9370fa50c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,65 @@ +package request + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { + reader := &offsetReader{} + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } + + reader.buf = buf + return reader, nil +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } + return newOffsetReader(o.buf, offset) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 000000000..d597c6ead --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,698 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" + + // ErrCodeRequestError is an error preventing the SDK from continuing to + // process the request. + ErrCodeRequestError = "RequestError" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + streamingBody io.ReadCloser + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + if retryer == nil { + retryer = noOpRetryer{} + } + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } + r.ResetBody() +} + +// SetStreamingBody set the reader to be used for the request that will stream +// bytes to the server. Request's Body must not be set to any reader. +func (r *Request) SetStreamingBody(reader io.ReadCloser) { + r.streamingBody = reader + r.SetReaderBody(aws.ReadSeekCloser(reader)) +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. + r.NotHoist = false + + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err + } + + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Any additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", notRetrying, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request, returning error if errors are encountered. +// +// Sign will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + + SanitizeHostForHeader(r.HTTPRequest) + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.streamingBody != nil { + return r.streamingBody, nil + } + + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) + } + + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + if err := r.Error; err != nil { + return err + } + + for { + r.Error = nil + r.AttemptTime = time.Now() + + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err + } + + if err := r.sendRequest(); err == nil { + return nil + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + if err := r.prepareRetry(); err != nil { + r.Error = err + return err + } + } +} + +func (r *Request) prepareRetry() error { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + + return nil +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + if r.URL == nil { + return "" + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 000000000..e36e468b7 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,39 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 000000000..de1292f45 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,36 @@ +// +build go1.8 + +package request + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 000000000..a7365cd1e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 000000000..307fa0705 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 000000000..64784e16f --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,266 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// for p.Next() { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// // ... +// // break out of loop to stop fetching additional pages +// } +// +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool + + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if !v { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { + tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 000000000..752ae47f8 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,309 @@ +package request + +import ( + "net" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. +type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. + RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. + ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. + MaxRetries() int +} + +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. The value must not be nil. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } + cfg.Retryer = retryer + return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + ErrCodeRequestError: {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporary); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry = shouldRetryError(origErr) + if err.Code() == ErrCodeRequestError && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true + } +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + + return IsErrorThrottle(r.Error) +} + +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 000000000..09a44eb98 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 000000000..8630683f3 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,286 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 000000000..4601f883c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go new file mode 100644 index 000000000..ea9ebb6f6 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go @@ -0,0 +1,26 @@ +// +build go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go new file mode 100644 index 000000000..fec39dfc1 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go new file mode 100644 index 000000000..1c5a5391e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go @@ -0,0 +1,23 @@ +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 000000000..fe6dac1f4 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,267 @@ +package session + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + creds := stscreds.NewWebIdentityCredentials( + &Session{ + Config: cfg, + Handlers: handlers.Copy(), + }, + roleARN, + sessionName, + filepath, + ) + + return creds, nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + ) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + + if sessOpts.AssumeRoleDuration == 0 && + sharedCfg.AssumeRoleDuration != nil && + *sharedCfg.AssumeRoleDuration/time.Minute > 15 { + opt.Duration = *sharedCfg.AssumeRoleDuration + } else if sessOpts.AssumeRoleDuration != 0 { + opt.Duration = sessOpts.AssumeRoleDuration + } + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 000000000..7ec66e7e5 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,245 @@ +/* +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. + +Sessions are safe to use concurrently as long as the Session is not being +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. + +Sessions options from Shared Config + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. + +Credential and config loading order + +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: + + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) + +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) + +Creating Sessions + +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. + + // Create Session + sess, err := session.NewSession() + + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess, err := session.NewSessionWithOptions(session.Options{ + // Options + }) + + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", + + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, + + // Force enable Shared Config support + SharedConfigState: session.SharedConfigEnable, + }) + +Adding Handlers + +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Printf("Request: %s/%s, Params: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Shared Config Fields + +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). + +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + + +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. +*/ +package session diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 000000000..c1e0e9c95 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,345 @@ +package session + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + csmEnabled string + CSMEnabled *bool + CSMPort string + CSMHost string + CSMClientID string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool +} + +var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() (envConfig, error) { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() (envConfig, error) { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) + + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) + } + } + + return cfg, nil +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) != 0 { + *dst = v + break + } + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 000000000..0ff499605 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,734 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/csm" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg, envErr := loadEnvConfig() + + if envCfg.EnableSharedConfig { + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + + return s + } + + s := deprecatedNewSession(cfgs...) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + } + + return s +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created, such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + var err error + if opts.SharedConfigState == SharedConfigEnable { + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } + } else { + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } + } + + if len(opts.Profile) != 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func deprecatedNewSession(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + return s +} + +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") + } + + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) + if err != nil { + return err + } + r.InjectHandlers(handlers) + + return nil +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) + if err != nil { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } + + return s, nil +} + +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCABundleTransport() + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + // Configure credentials if not already set by the user when creating the + // Session. + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } + + return nil +} + +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } +} + +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, copying the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + region := aws.StringValue(s.Config.Region) + resolved, err := s.resolveEndpoint(service, region, s.Config) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + PartitionID: resolved.PartitionID, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} + +func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = aws.StringValue(s.Config.Region) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 000000000..680805a38 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,555 @@ +package session + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/internal/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + + // Additional Config fields + regionKey = `region` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential Process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" +) + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + AssumeRoleDuration *time.Duration + + SourceProfileName string + SourceProfile *sharedConfig + + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. + // + // region + Region string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool +} + +type sharedConfigFile struct { + Filename string + IniData ini.Sections +} + +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of +// A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { + return sharedConfig{}, err + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason + continue + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: sections, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() + } else { + // First time a profile has been seen, It must either be a assume role + // or credentials. Assert if the credential type requires a role ARN, + // the ARN is also set. + if err := cfg.validateCredentialsRequireARN(profile); err != nil { + return err + } + } + profiles[profile] = struct{}{} + + if err := cfg.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() + + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg + } + + return nil +} + +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { + section, ok := file.IniData.GetSection(profile) + if !ok { + // Fallback to to alternate profile name: profile + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} + } + } + + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + + if section.Has(roleDurationSecondsKey) { + d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + cfg.AssumeRoleDuration = &d + } + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre + } + + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre + } + } + + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds + } + + // Endpoint discovery + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string + SourceProfile string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 000000000..07ea799fb --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,81 @@ +package v4 + +import ( + "github.com/aws/aws-sdk-go/internal/strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 000000000..6aa2ed241 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go new file mode 100644 index 000000000..f35fc860b --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return aws.BackgroundContext() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 000000000..fed5c859c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,13 @@ +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go new file mode 100644 index 000000000..02cbd97e2 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go @@ -0,0 +1,63 @@ +package v4 + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +type credentialValueProvider interface { + Get() (credentials.Value, error) +} + +// StreamSigner implements signing of event stream encoded payloads +type StreamSigner struct { + region string + service string + + credentials credentialValueProvider + + prevSig []byte +} + +// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages +func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { + return &StreamSigner{ + region: region, + service: service, + credentials: credentials, + prevSig: seedSignature, + } +} + +// GetSignature takes an event stream encoded headers and payload and returns a signature +func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { + credValue, err := s.credentials.Get() + if err != nil { + return nil, err + } + + sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) + + keyPath := buildSigningScope(s.region, s.service, date) + + stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) + + signature := hmacSHA256(sigKey, []byte(stringToSign)) + s.prevSig = signature + + return signature, nil +} + +func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + formatTime(date), + scope, + hex.EncodeToString(prevSig), + hex.EncodeToString(hashSHA256(headers)), + hex.EncodeToString(hashSHA256(payload)), + }, "\n") +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 000000000..bd082e9d1 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 000000000..d71f7b3f4 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,846 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authorizationHeader = "Authorization" + authHeaderSignatureElem = "Signature=" + signatureQueryKey = "X-Amz-Signature" + + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + awsV4Request = "aws4_request" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + authorizationHeader: struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disables the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, false, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, true, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) + if err != nil { + return http.Header{}, err + } + + ctx.sanitizeHostForHeader() + ctx.assignAmzQueryValues() + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } +} + +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + curTime := curTimeFn() + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTime +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + if err := ctx.buildBodyDigest(); err != nil { + return err + } + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + authHeaderSignatureElem + ctx.signature, + } + ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) + } + + return nil +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func (ctx *signingCtx) buildTime() { + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + if !r.IsValid(k) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + formatTime(ctx.Time), + ctx.credentialString, + hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) + signature := hmacSHA256(creds, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() error { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { + hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) + } + + if includeSHA256Header { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash + + return nil +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func hmacSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func hashSHA256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { + hash := sha256.New() + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil), nil +} + +const doubleSpace = " " + +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + vals[i] = string(buf[:m]) + } +} + +func buildSigningScope(region, service string, dt time.Time) string { + return strings.Join([]string{ + formatShortTime(dt), + region, + service, + awsV4Request, + }, "/") +} + +func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + signingKey := hmacSHA256(kService, []byte(awsV4Request)) + return signingKey +} + +func formatShortTime(dt time.Time) string { + return dt.UTC().Format(shortTimeFormat) +} + +func formatTime(dt time.Time) string { + return dt.UTC().Format(timeFormat) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/types.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 000000000..98751ee84 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,264 @@ +package aws + +import ( + "io" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} + +// MultiCloser is a utility to close multiple io.Closers within a single +// statement. +type MultiCloser []io.Closer + +// Close closes all of the io.Closers making up the MultiClosers. Any +// errors that occur while closing will be returned in the order they +// occur. +func (m MultiCloser) Close() error { + var errs errors + for _, c := range m { + err := c.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + + return nil +} + +type errors []error + +func (es errors) Error() string { + var parts []string + for _, e := range es { + parts = append(parts, e.Error()) + } + + return strings.Join(parts, "\n") +} + +// CopySeekableBody copies the seekable body to an io.Writer +func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // copy errors may be assumed to be from the body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + // seek back to the first position after reading to reset + // the body for transmission. + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/url.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 000000000..6192b2455 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 000000000..0210d2720 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/aws/version.go b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 000000000..55a35a03e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.31.6" diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go new file mode 100644 index 000000000..876dcb3fd --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go @@ -0,0 +1,40 @@ +// +build !go1.7 + +package context + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case BackgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +// BackgroundCtx is the common base context. +var BackgroundCtx = new(emptyCtx) diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 000000000..e83a99886 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 000000000..0895d53cb --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 000000000..0b76999ba --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 000000000..25ce0fe13 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,29 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> value stmt' +// stmt' -> epsilon | op stmt +// value -> number | string | boolean | quoted_string +// +// section -> [ section' +// section' -> value section_close +// section_close -> ] +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 000000000..04345a54c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 000000000..91ba2a59d --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 000000000..8d462f77e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 000000000..3b0ca7afe --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 000000000..582c024ad --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 000000000..cf9fad81e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,356 @@ +package ini + +import ( + "fmt" + "io" +) + +// State enums for the parse table +const ( + InvalidState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]int{ + ASTKindStart: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: map[TokenType]int{ + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: map[TokenType]int{ + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + }, + ASTKindStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: map[TokenType]int{ + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: map[TokenType]int{ + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + // if should skip is true, we skip the tokens until should skip is set to false. + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + // + // This grammar occurs when the RHS is a number, word, or quoted string. + // equal_expr -> lit op equal_expr' + // equal_expr' -> number | string | quoted_string + // quoted_string -> " quoted_string' + // quoted_string' -> string quoted_string_end + // quoted_string_end -> " + // + // otherwise + // expr_stmt -> equal_expr (expr_stmt')* + // expr_stmt' -> ws S | op S | MarkComplete + // S -> equal_expr' expr_stmt' + switch k.Kind { + case ASTKindEqualExpr: + // assigning a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + k.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 000000000..24df543d3 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,324 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = runeCompare(v.raw, runesTrue) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 000000000..e52ac399f --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 000000000..a45c0bc56 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 000000000..8a84c7cbe --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 000000000..457287019 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 000000000..7f01cf7c7 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 000000000..f82095ba2 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 000000000..da7a4049c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + s.Continue() + return false + } + s.prevTok = tok + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true +} + +func (s *skipper) Continue() { + s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false + s.prevTok = emptyToken +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 000000000..18f3fe893 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 000000000..305999d29 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 000000000..94841c324 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,166 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + if rhs.Root.Type() != TokenLit { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 000000000..99915f7f7 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 000000000..7ffb4ae06 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 000000000..6c443988b --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 000000000..5aa9137e0 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 000000000..e5f005613 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 000000000..44898eed0 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 000000000..810ec7f08 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 000000000..0c9802d87 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 000000000..f4651da2d --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,11 @@ +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 000000000..b1d93a33d --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,24 @@ +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 000000000..38ea61afe --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 000000000..7da8a49ce --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 000000000..ebcbc2b40 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go new file mode 100644 index 000000000..d008ae27c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 000000000..14ad0c589 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 000000000..d7d42db0a --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,68 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + labels := strings.Split(host, ".") + + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(host) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 000000000..915b0fcaf --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 000000000..53831dff9 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 000000000..864fb6704 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,296 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 000000000..5e9499699 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,282 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") +} + +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return u.unmarshalStruct(value, data, tag) + case "list": + return u.unmarshalList(value, data, tag) + case "map": + return u.unmarshalMap(value, data, tag) + default: + return u.unmarshalScalar(value, data, tag) + } +} + +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } + + member := value.FieldByIndex(field.Index) + err := u.unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := u.unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + u.unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + // Time unmarshaled from a float64 can only be epoch seconds + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 000000000..a029217e4 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,88 @@ +// Package jsonrpc provides JSON RPC utilities for serialization of AWS +// requests and responses. +package jsonrpc + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { + req.SetBufferBody(buf) + } + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + + // Only set the content type if one is not already specified and an + // JSONVersion is specified. + if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go new file mode 100644 index 000000000..c0c52e2db --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -0,0 +1,107 @@ +package jsonrpc + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + body := ioutil.NopCloser(&buf) + + // Code may be separated by hash(#), with the last element being the code + // used by the SDK. + codeParts := strings.SplitN(jsonErr.Code, "#", 2) + code := codeParts[len(codeParts)-1] + msg := jsonErr.Message + + if fn, ok := u.exceptions[code]; ok { + // If exception code is know, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + v := fn(respMeta) + err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) + if err != nil { + return nil, err + } + + return v, nil + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 000000000..776d11018 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 000000000..0ea0647a5 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "PUT"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go new file mode 100644 index 000000000..9d521dcb9 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go @@ -0,0 +1,49 @@ +package protocol + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequireHTTPMinProtocol request handler is used to enforce that +// the target endpoint supports the given major and minor HTTP protocol version. +type RequireHTTPMinProtocol struct { + Major, Minor int +} + +// Handler will mark the request.Request with an error if the +// target endpoint did not connect with the required HTTP protocol +// major and minor version. +func (p RequireHTTPMinProtocol) Handler(r *request.Request) { + if r.Error != nil || r.HTTPResponse == nil { + return + } + + if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } + + if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } +} + +// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint +// did not match the required HTTP major and minor protocol version. +const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" + +func newMinHTTPProtoError(major, minor int, r *request.Request) error { + return awserr.NewRequestFailure( + awserr.New("MinimumHTTPProtocolError", + fmt.Sprintf( + "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + major, minor, r.HTTPResponse.Proto, + ), + nil, + ), + r.HTTPResponse.StatusCode, r.RequestID, + ) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 000000000..d40346a77 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) + return + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 000000000..75866d012 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,246 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 000000000..9231e95d1 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,39 @@ +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 000000000..831b0110c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,69 @@ +package query + +import ( + "encoding/xml" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +type xmlErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlResponseError struct { + xmlErrorResponse +} + +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID + } + + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 000000000..1301b149d --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,310 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +var byteSliceType = reflect.TypeOf([]byte{}) + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + + } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) + + header.Add(prefix+keyStr, str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } + default: + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 000000000..4366de2e1 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 000000000..92f8b4d9a --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,257 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + awsStrings "github.com/aws/aws-sdk-go/internal/strings" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + if err := unmarshalBody(r, v); err != nil { + r.Error = err + } + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { + r.Error = err + } + } +} + +// UnmarshalResponse attempts to unmarshal the REST response headers to +// the data type passed in. The type must be a pointer. An error is returned +// with any error unmarshaling the response into the target datatype. +func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { + v := reflect.Indirect(reflect.ValueOf(data)) + return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) +} + +func unmarshalBody(r *request.Request, v reflect.Value) error { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + payload.Set(reflect.ValueOf(b)) + + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + str := string(b) + payload.Set(reflect.ValueOf(&str)) + + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to read response body", err) + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() + return awserr.New(request.ErrCodeSerialization, + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } + + return nil +} + +func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, resp.StatusCode) + + case "header": + err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) + if err != nil { + awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + } + } + } + + return nil +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { + if len(headers) == 0 { + return nil + } + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + if awsStrings.HasPrefixFold(k, prefix) { + if normalize == true { + k = strings.ToLower(k) + } else { + k = http.CanonicalHeaderKey(k) + } + out[k[len(prefix):]] = &v[0] + } + } + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + switch tag.Get("type") { + case "jsonvalue": + if len(header) == 0 { + return nil + } + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 000000000..05d4ff519 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,84 @@ +package protocol + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +// Output time is intended to not contain decimals +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time without seconds precision + ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC() + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822OutputTimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601OutputTimeFormat) + case UnixTimeFormatName: + return strconv.FormatInt(t.Unix(), 10) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), int64(dec*(1e9))), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 000000000..f614ef898 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,27 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} + +// ResponseMetadata provides the SDK response metadata attributes. +type ResponseMetadata struct { + StatusCode int + RequestID string +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go new file mode 100644 index 000000000..cc857f136 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go @@ -0,0 +1,65 @@ +package protocol + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalErrorHandler provides unmarshaling errors API response errors for +// both typed and untyped errors. +type UnmarshalErrorHandler struct { + unmarshaler ErrorUnmarshaler +} + +// ErrorUnmarshaler is an abstract interface for concrete implementations to +// unmarshal protocol specific response errors. +type ErrorUnmarshaler interface { + UnmarshalError(*http.Response, ResponseMetadata) (error, error) +} + +// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler +// initialized for the set of exception names to the error unmarshalers +func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { + return &UnmarshalErrorHandler{ + unmarshaler: unmarshaler, + } +} + +// UnmarshalErrorHandlerName is the name of the named handler. +const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" + +// NamedHandler returns a NamedHandler for the unmarshaler using the set of +// errors the unmarshaler was initialized for. +func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { + return request.NamedHandler{ + Name: UnmarshalErrorHandlerName, + Fn: u.UnmarshalError, + } +} + +// UnmarshalError will attempt to unmarshal the API response's error message +// into either a generic SDK error type, or a typed error corresponding to the +// errors exception name. +func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + respMeta := ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + } + + v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + respMeta.StatusCode, + respMeta.RequestID, + ) + return + } + + r.Error = v +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 000000000..09ad95159 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,315 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, sorted) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + var payloadFields, nonPayloadFields int + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ + continue + } + payloadFields++ + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + } + + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 000000000..c1a511851 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 000000000..107c053f8 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,299 @@ +package xmlutil + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 000000000..42f71648e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,159 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go new file mode 100644 index 000000000..12e2e1bd6 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -0,0 +1,8820 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ecr + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opBatchCheckLayerAvailability = "BatchCheckLayerAvailability" + +// BatchCheckLayerAvailabilityRequest generates a "aws/request.Request" representing the +// client's request for the BatchCheckLayerAvailability operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchCheckLayerAvailability for more information on using the BatchCheckLayerAvailability +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchCheckLayerAvailabilityRequest method. +// req, resp := client.BatchCheckLayerAvailabilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailability +func (c *ECR) BatchCheckLayerAvailabilityRequest(input *BatchCheckLayerAvailabilityInput) (req *request.Request, output *BatchCheckLayerAvailabilityOutput) { + op := &request.Operation{ + Name: opBatchCheckLayerAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchCheckLayerAvailabilityInput{} + } + + output = &BatchCheckLayerAvailabilityOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchCheckLayerAvailability API operation for Amazon EC2 Container Registry. +// +// Checks the availability of one or more image layers in a repository. +// +// When an image is pushed to a repository, each image layer is checked to verify +// if it has been uploaded before. If it has been uploaded, then the image layer +// is skipped. +// +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation BatchCheckLayerAvailability for usage and error information. +// +// Returned Error Types: +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ServerException +// These errors are usually caused by a server-side issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailability +func (c *ECR) BatchCheckLayerAvailability(input *BatchCheckLayerAvailabilityInput) (*BatchCheckLayerAvailabilityOutput, error) { + req, out := c.BatchCheckLayerAvailabilityRequest(input) + return out, req.Send() +} + +// BatchCheckLayerAvailabilityWithContext is the same as BatchCheckLayerAvailability with the addition of +// the ability to pass a context and additional request options. +// +// See BatchCheckLayerAvailability for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) BatchCheckLayerAvailabilityWithContext(ctx aws.Context, input *BatchCheckLayerAvailabilityInput, opts ...request.Option) (*BatchCheckLayerAvailabilityOutput, error) { + req, out := c.BatchCheckLayerAvailabilityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBatchDeleteImage = "BatchDeleteImage" + +// BatchDeleteImageRequest generates a "aws/request.Request" representing the +// client's request for the BatchDeleteImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchDeleteImage for more information on using the BatchDeleteImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchDeleteImageRequest method. +// req, resp := client.BatchDeleteImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImage +func (c *ECR) BatchDeleteImageRequest(input *BatchDeleteImageInput) (req *request.Request, output *BatchDeleteImageOutput) { + op := &request.Operation{ + Name: opBatchDeleteImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchDeleteImageInput{} + } + + output = &BatchDeleteImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchDeleteImage API operation for Amazon EC2 Container Registry. +// +// Deletes a list of specified images within a repository. Images are specified +// with either an imageTag or imageDigest. +// +// You can remove a tag from an image by specifying the image's tag in your +// request. When you remove the last tag from an image, the image is deleted +// from your repository. +// +// You can completely delete an image (and all of its tags) by specifying the +// image's digest in your request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation BatchDeleteImage for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImage +func (c *ECR) BatchDeleteImage(input *BatchDeleteImageInput) (*BatchDeleteImageOutput, error) { + req, out := c.BatchDeleteImageRequest(input) + return out, req.Send() +} + +// BatchDeleteImageWithContext is the same as BatchDeleteImage with the addition of +// the ability to pass a context and additional request options. +// +// See BatchDeleteImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) BatchDeleteImageWithContext(ctx aws.Context, input *BatchDeleteImageInput, opts ...request.Option) (*BatchDeleteImageOutput, error) { + req, out := c.BatchDeleteImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBatchGetImage = "BatchGetImage" + +// BatchGetImageRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchGetImage for more information on using the BatchGetImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchGetImageRequest method. +// req, resp := client.BatchGetImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage +func (c *ECR) BatchGetImageRequest(input *BatchGetImageInput) (req *request.Request, output *BatchGetImageOutput) { + op := &request.Operation{ + Name: opBatchGetImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetImageInput{} + } + + output = &BatchGetImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchGetImage API operation for Amazon EC2 Container Registry. +// +// Gets detailed information for an image. Images are specified with either +// an imageTag or imageDigest. +// +// When an image is pulled, the BatchGetImage API is called once to retrieve +// the image manifest. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation BatchGetImage for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage +func (c *ECR) BatchGetImage(input *BatchGetImageInput) (*BatchGetImageOutput, error) { + req, out := c.BatchGetImageRequest(input) + return out, req.Send() +} + +// BatchGetImageWithContext is the same as BatchGetImage with the addition of +// the ability to pass a context and additional request options. +// +// See BatchGetImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) BatchGetImageWithContext(ctx aws.Context, input *BatchGetImageInput, opts ...request.Option) (*BatchGetImageOutput, error) { + req, out := c.BatchGetImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCompleteLayerUpload = "CompleteLayerUpload" + +// CompleteLayerUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteLayerUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteLayerUpload for more information on using the CompleteLayerUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CompleteLayerUploadRequest method. +// req, resp := client.CompleteLayerUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload +func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req *request.Request, output *CompleteLayerUploadOutput) { + op := &request.Operation{ + Name: opCompleteLayerUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CompleteLayerUploadInput{} + } + + output = &CompleteLayerUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteLayerUpload API operation for Amazon EC2 Container Registry. +// +// Informs Amazon ECR that the image layer upload has completed for a specified +// registry, repository name, and upload ID. You can optionally provide a sha256 +// digest of the image layer for data validation purposes. +// +// When an image is pushed, the CompleteLayerUpload API is called once per each +// new image layer to verify that the upload has completed. +// +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation CompleteLayerUpload for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * UploadNotFoundException +// The upload could not be found, or the specified upload id is not valid for +// this repository. +// +// * InvalidLayerException +// The layer digest calculation performed by Amazon ECR upon receipt of the +// image layer does not match the digest specified. +// +// * LayerPartTooSmallException +// Layer parts must be at least 5 MiB in size. +// +// * LayerAlreadyExistsException +// The image layer already exists in the associated repository. +// +// * EmptyUploadException +// The specified layer upload does not contain any layer parts. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload +func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLayerUploadOutput, error) { + req, out := c.CompleteLayerUploadRequest(input) + return out, req.Send() +} + +// CompleteLayerUploadWithContext is the same as CompleteLayerUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteLayerUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) CompleteLayerUploadWithContext(ctx aws.Context, input *CompleteLayerUploadInput, opts ...request.Option) (*CompleteLayerUploadOutput, error) { + req, out := c.CompleteLayerUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateRepository = "CreateRepository" + +// CreateRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the CreateRepository operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateRepository for more information on using the CreateRepository +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateRepositoryRequest method. +// req, resp := client.CreateRepositoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository +func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *request.Request, output *CreateRepositoryOutput) { + op := &request.Operation{ + Name: opCreateRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRepositoryInput{} + } + + output = &CreateRepositoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateRepository API operation for Amazon EC2 Container Registry. +// +// Creates a repository. For more information, see Amazon ECR Repositories (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) +// in the Amazon Elastic Container Registry User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation CreateRepository for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * InvalidTagParameterException +// An invalid parameter has been specified. Tag keys can have a maximum character +// length of 128 characters, and tag values can have a maximum length of 256 +// characters. +// +// * TooManyTagsException +// The list of tags on the repository is over the limit. The maximum number +// of tags that can be applied to a repository is 50. +// +// * RepositoryAlreadyExistsException +// The specified repository already exists in the specified registry. +// +// * LimitExceededException +// The operation did not succeed because it would have exceeded a service limit +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) +// in the Amazon Elastic Container Registry User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository +func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { + req, out := c.CreateRepositoryRequest(input) + return out, req.Send() +} + +// CreateRepositoryWithContext is the same as CreateRepository with the addition of +// the ability to pass a context and additional request options. +// +// See CreateRepository for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) CreateRepositoryWithContext(ctx aws.Context, input *CreateRepositoryInput, opts ...request.Option) (*CreateRepositoryOutput, error) { + req, out := c.CreateRepositoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLifecyclePolicy = "DeleteLifecyclePolicy" + +// DeleteLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLifecyclePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLifecyclePolicy for more information on using the DeleteLifecyclePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLifecyclePolicyRequest method. +// req, resp := client.DeleteLifecyclePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy +func (c *ECR) DeleteLifecyclePolicyRequest(input *DeleteLifecyclePolicyInput) (req *request.Request, output *DeleteLifecyclePolicyOutput) { + op := &request.Operation{ + Name: opDeleteLifecyclePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLifecyclePolicyInput{} + } + + output = &DeleteLifecyclePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteLifecyclePolicy API operation for Amazon EC2 Container Registry. +// +// Deletes the lifecycle policy associated with the specified repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation DeleteLifecyclePolicy for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * LifecyclePolicyNotFoundException +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy +func (c *ECR) DeleteLifecyclePolicy(input *DeleteLifecyclePolicyInput) (*DeleteLifecyclePolicyOutput, error) { + req, out := c.DeleteLifecyclePolicyRequest(input) + return out, req.Send() +} + +// DeleteLifecyclePolicyWithContext is the same as DeleteLifecyclePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLifecyclePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DeleteLifecyclePolicyWithContext(ctx aws.Context, input *DeleteLifecyclePolicyInput, opts ...request.Option) (*DeleteLifecyclePolicyOutput, error) { + req, out := c.DeleteLifecyclePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteRepository = "DeleteRepository" + +// DeleteRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRepository operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRepository for more information on using the DeleteRepository +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteRepositoryRequest method. +// req, resp := client.DeleteRepositoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository +func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *request.Request, output *DeleteRepositoryOutput) { + op := &request.Operation{ + Name: opDeleteRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRepositoryInput{} + } + + output = &DeleteRepositoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteRepository API operation for Amazon EC2 Container Registry. +// +// Deletes a repository. If the repository contains images, you must either +// delete all images in the repository or use the force option to delete the +// repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation DeleteRepository for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * RepositoryNotEmptyException +// The specified repository contains images. To delete a repository that contains +// images, you must force the deletion with the force parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository +func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) { + req, out := c.DeleteRepositoryRequest(input) + return out, req.Send() +} + +// DeleteRepositoryWithContext is the same as DeleteRepository with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRepository for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DeleteRepositoryWithContext(ctx aws.Context, input *DeleteRepositoryInput, opts ...request.Option) (*DeleteRepositoryOutput, error) { + req, out := c.DeleteRepositoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteRepositoryPolicy = "DeleteRepositoryPolicy" + +// DeleteRepositoryPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRepositoryPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRepositoryPolicy for more information on using the DeleteRepositoryPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteRepositoryPolicyRequest method. +// req, resp := client.DeleteRepositoryPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicy +func (c *ECR) DeleteRepositoryPolicyRequest(input *DeleteRepositoryPolicyInput) (req *request.Request, output *DeleteRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opDeleteRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRepositoryPolicyInput{} + } + + output = &DeleteRepositoryPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteRepositoryPolicy API operation for Amazon EC2 Container Registry. +// +// Deletes the repository policy associated with the specified repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation DeleteRepositoryPolicy for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * RepositoryPolicyNotFoundException +// The specified repository and registry combination does not have an associated +// repository policy. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicy +func (c *ECR) DeleteRepositoryPolicy(input *DeleteRepositoryPolicyInput) (*DeleteRepositoryPolicyOutput, error) { + req, out := c.DeleteRepositoryPolicyRequest(input) + return out, req.Send() +} + +// DeleteRepositoryPolicyWithContext is the same as DeleteRepositoryPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRepositoryPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DeleteRepositoryPolicyWithContext(ctx aws.Context, input *DeleteRepositoryPolicyInput, opts ...request.Option) (*DeleteRepositoryPolicyOutput, error) { + req, out := c.DeleteRepositoryPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeImageScanFindings = "DescribeImageScanFindings" + +// DescribeImageScanFindingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImageScanFindings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImageScanFindings for more information on using the DescribeImageScanFindings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeImageScanFindingsRequest method. +// req, resp := client.DescribeImageScanFindingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageScanFindings +func (c *ECR) DescribeImageScanFindingsRequest(input *DescribeImageScanFindingsInput) (req *request.Request, output *DescribeImageScanFindingsOutput) { + op := &request.Operation{ + Name: opDescribeImageScanFindings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeImageScanFindingsInput{} + } + + output = &DescribeImageScanFindingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImageScanFindings API operation for Amazon EC2 Container Registry. +// +// Returns the scan findings for the specified image. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation DescribeImageScanFindings for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ImageNotFoundException +// The image requested does not exist in the specified repository. +// +// * ScanNotFoundException +// The specified image scan could not be found. Ensure that image scanning is +// enabled on the repository and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageScanFindings +func (c *ECR) DescribeImageScanFindings(input *DescribeImageScanFindingsInput) (*DescribeImageScanFindingsOutput, error) { + req, out := c.DescribeImageScanFindingsRequest(input) + return out, req.Send() +} + +// DescribeImageScanFindingsWithContext is the same as DescribeImageScanFindings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImageScanFindings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeImageScanFindingsWithContext(ctx aws.Context, input *DescribeImageScanFindingsInput, opts ...request.Option) (*DescribeImageScanFindingsOutput, error) { + req, out := c.DescribeImageScanFindingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeImageScanFindingsPages iterates over the pages of a DescribeImageScanFindings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeImageScanFindings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeImageScanFindings operation. +// pageNum := 0 +// err := client.DescribeImageScanFindingsPages(params, +// func(page *ecr.DescribeImageScanFindingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECR) DescribeImageScanFindingsPages(input *DescribeImageScanFindingsInput, fn func(*DescribeImageScanFindingsOutput, bool) bool) error { + return c.DescribeImageScanFindingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeImageScanFindingsPagesWithContext same as DescribeImageScanFindingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeImageScanFindingsPagesWithContext(ctx aws.Context, input *DescribeImageScanFindingsInput, fn func(*DescribeImageScanFindingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeImageScanFindingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImageScanFindingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeImageScanFindingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeImages = "DescribeImages" + +// DescribeImagesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImages operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImages for more information on using the DescribeImages +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeImagesRequest method. +// req, resp := client.DescribeImagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImages +func (c *ECR) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Request, output *DescribeImagesOutput) { + op := &request.Operation{ + Name: opDescribeImages, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeImagesInput{} + } + + output = &DescribeImagesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImages API operation for Amazon EC2 Container Registry. +// +// Returns metadata about the images in a repository. +// +// Beginning with Docker version 1.9, the Docker client compresses image layers +// before pushing them to a V2 Docker registry. The output of the docker images +// command shows the uncompressed image size, so it may return a larger image +// size than the image sizes returned by DescribeImages. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation DescribeImages for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ImageNotFoundException +// The image requested does not exist in the specified repository. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImages +func (c *ECR) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) { + req, out := c.DescribeImagesRequest(input) + return out, req.Send() +} + +// DescribeImagesWithContext is the same as DescribeImages with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImages for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeImagesWithContext(ctx aws.Context, input *DescribeImagesInput, opts ...request.Option) (*DescribeImagesOutput, error) { + req, out := c.DescribeImagesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeImagesPages iterates over the pages of a DescribeImages operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeImages method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeImages operation. +// pageNum := 0 +// err := client.DescribeImagesPages(params, +// func(page *ecr.DescribeImagesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECR) DescribeImagesPages(input *DescribeImagesInput, fn func(*DescribeImagesOutput, bool) bool) error { + return c.DescribeImagesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeImagesPagesWithContext same as DescribeImagesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeImagesPagesWithContext(ctx aws.Context, input *DescribeImagesInput, fn func(*DescribeImagesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeImagesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImagesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeImagesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeRepositories = "DescribeRepositories" + +// DescribeRepositoriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRepositories operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeRepositories for more information on using the DescribeRepositories +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeRepositoriesRequest method. +// req, resp := client.DescribeRepositoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositories +func (c *ECR) DescribeRepositoriesRequest(input *DescribeRepositoriesInput) (req *request.Request, output *DescribeRepositoriesOutput) { + op := &request.Operation{ + Name: opDescribeRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeRepositoriesInput{} + } + + output = &DescribeRepositoriesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeRepositories API operation for Amazon EC2 Container Registry. +// +// Describes image repositories in a registry. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation DescribeRepositories for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositories +func (c *ECR) DescribeRepositories(input *DescribeRepositoriesInput) (*DescribeRepositoriesOutput, error) { + req, out := c.DescribeRepositoriesRequest(input) + return out, req.Send() +} + +// DescribeRepositoriesWithContext is the same as DescribeRepositories with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeRepositories for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeRepositoriesWithContext(ctx aws.Context, input *DescribeRepositoriesInput, opts ...request.Option) (*DescribeRepositoriesOutput, error) { + req, out := c.DescribeRepositoriesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeRepositoriesPages iterates over the pages of a DescribeRepositories operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeRepositories method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeRepositories operation. +// pageNum := 0 +// err := client.DescribeRepositoriesPages(params, +// func(page *ecr.DescribeRepositoriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECR) DescribeRepositoriesPages(input *DescribeRepositoriesInput, fn func(*DescribeRepositoriesOutput, bool) bool) error { + return c.DescribeRepositoriesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeRepositoriesPagesWithContext same as DescribeRepositoriesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeRepositoriesPagesWithContext(ctx aws.Context, input *DescribeRepositoriesInput, fn func(*DescribeRepositoriesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeRepositoriesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeRepositoriesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeRepositoriesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetAuthorizationToken = "GetAuthorizationToken" + +// GetAuthorizationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetAuthorizationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAuthorizationToken for more information on using the GetAuthorizationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAuthorizationTokenRequest method. +// req, resp := client.GetAuthorizationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationToken +func (c *ECR) GetAuthorizationTokenRequest(input *GetAuthorizationTokenInput) (req *request.Request, output *GetAuthorizationTokenOutput) { + op := &request.Operation{ + Name: opGetAuthorizationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAuthorizationTokenInput{} + } + + output = &GetAuthorizationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAuthorizationToken API operation for Amazon EC2 Container Registry. +// +// Retrieves an authorization token. An authorization token represents your +// IAM authentication credentials and can be used to access any Amazon ECR registry +// that your IAM principal has access to. The authorization token is valid for +// 12 hours. +// +// The authorizationToken returned is a base64 encoded string that can be decoded +// and used in a docker login command to authenticate to a registry. The AWS +// CLI offers an get-login-password command that simplifies the login process. +// For more information, see Registry Authentication (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth) +// in the Amazon Elastic Container Registry User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation GetAuthorizationToken for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationToken +func (c *ECR) GetAuthorizationToken(input *GetAuthorizationTokenInput) (*GetAuthorizationTokenOutput, error) { + req, out := c.GetAuthorizationTokenRequest(input) + return out, req.Send() +} + +// GetAuthorizationTokenWithContext is the same as GetAuthorizationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetAuthorizationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetAuthorizationTokenWithContext(ctx aws.Context, input *GetAuthorizationTokenInput, opts ...request.Option) (*GetAuthorizationTokenOutput, error) { + req, out := c.GetAuthorizationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDownloadUrlForLayer = "GetDownloadUrlForLayer" + +// GetDownloadUrlForLayerRequest generates a "aws/request.Request" representing the +// client's request for the GetDownloadUrlForLayer operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDownloadUrlForLayer for more information on using the GetDownloadUrlForLayer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetDownloadUrlForLayerRequest method. +// req, resp := client.GetDownloadUrlForLayerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer +func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) (req *request.Request, output *GetDownloadUrlForLayerOutput) { + op := &request.Operation{ + Name: opGetDownloadUrlForLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDownloadUrlForLayerInput{} + } + + output = &GetDownloadUrlForLayerOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDownloadUrlForLayer API operation for Amazon EC2 Container Registry. +// +// Retrieves the pre-signed Amazon S3 download URL corresponding to an image +// layer. You can only get URLs for image layers that are referenced in an image. +// +// When an image is pulled, the GetDownloadUrlForLayer API is called once per +// image layer that is not already cached. +// +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation GetDownloadUrlForLayer for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * LayersNotFoundException +// The specified layers could not be found, or the specified layer is not valid +// for this repository. +// +// * LayerInaccessibleException +// The specified layer is not available because it is not associated with an +// image. Unassociated image layers may be cleaned up at any time. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer +func (c *ECR) GetDownloadUrlForLayer(input *GetDownloadUrlForLayerInput) (*GetDownloadUrlForLayerOutput, error) { + req, out := c.GetDownloadUrlForLayerRequest(input) + return out, req.Send() +} + +// GetDownloadUrlForLayerWithContext is the same as GetDownloadUrlForLayer with the addition of +// the ability to pass a context and additional request options. +// +// See GetDownloadUrlForLayer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetDownloadUrlForLayerWithContext(ctx aws.Context, input *GetDownloadUrlForLayerInput, opts ...request.Option) (*GetDownloadUrlForLayerOutput, error) { + req, out := c.GetDownloadUrlForLayerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetLifecyclePolicy = "GetLifecyclePolicy" + +// GetLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetLifecyclePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLifecyclePolicy for more information on using the GetLifecyclePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLifecyclePolicyRequest method. +// req, resp := client.GetLifecyclePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy +func (c *ECR) GetLifecyclePolicyRequest(input *GetLifecyclePolicyInput) (req *request.Request, output *GetLifecyclePolicyOutput) { + op := &request.Operation{ + Name: opGetLifecyclePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLifecyclePolicyInput{} + } + + output = &GetLifecyclePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLifecyclePolicy API operation for Amazon EC2 Container Registry. +// +// Retrieves the lifecycle policy for the specified repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation GetLifecyclePolicy for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * LifecyclePolicyNotFoundException +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy +func (c *ECR) GetLifecyclePolicy(input *GetLifecyclePolicyInput) (*GetLifecyclePolicyOutput, error) { + req, out := c.GetLifecyclePolicyRequest(input) + return out, req.Send() +} + +// GetLifecyclePolicyWithContext is the same as GetLifecyclePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetLifecyclePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetLifecyclePolicyWithContext(ctx aws.Context, input *GetLifecyclePolicyInput, opts ...request.Option) (*GetLifecyclePolicyOutput, error) { + req, out := c.GetLifecyclePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetLifecyclePolicyPreview = "GetLifecyclePolicyPreview" + +// GetLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the +// client's request for the GetLifecyclePolicyPreview operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLifecyclePolicyPreview for more information on using the GetLifecyclePolicyPreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLifecyclePolicyPreviewRequest method. +// req, resp := client.GetLifecyclePolicyPreviewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview +func (c *ECR) GetLifecyclePolicyPreviewRequest(input *GetLifecyclePolicyPreviewInput) (req *request.Request, output *GetLifecyclePolicyPreviewOutput) { + op := &request.Operation{ + Name: opGetLifecyclePolicyPreview, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetLifecyclePolicyPreviewInput{} + } + + output = &GetLifecyclePolicyPreviewOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// +// Retrieves the results of the lifecycle policy preview request for the specified +// repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation GetLifecyclePolicyPreview for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * LifecyclePolicyPreviewNotFoundException +// There is no dry run for this repository. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview +func (c *ECR) GetLifecyclePolicyPreview(input *GetLifecyclePolicyPreviewInput) (*GetLifecyclePolicyPreviewOutput, error) { + req, out := c.GetLifecyclePolicyPreviewRequest(input) + return out, req.Send() +} + +// GetLifecyclePolicyPreviewWithContext is the same as GetLifecyclePolicyPreview with the addition of +// the ability to pass a context and additional request options. +// +// See GetLifecyclePolicyPreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetLifecyclePolicyPreviewWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, opts ...request.Option) (*GetLifecyclePolicyPreviewOutput, error) { + req, out := c.GetLifecyclePolicyPreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetLifecyclePolicyPreviewPages iterates over the pages of a GetLifecyclePolicyPreview operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetLifecyclePolicyPreview method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetLifecyclePolicyPreview operation. +// pageNum := 0 +// err := client.GetLifecyclePolicyPreviewPages(params, +// func(page *ecr.GetLifecyclePolicyPreviewOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECR) GetLifecyclePolicyPreviewPages(input *GetLifecyclePolicyPreviewInput, fn func(*GetLifecyclePolicyPreviewOutput, bool) bool) error { + return c.GetLifecyclePolicyPreviewPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetLifecyclePolicyPreviewPagesWithContext same as GetLifecyclePolicyPreviewPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetLifecyclePolicyPreviewPagesWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, fn func(*GetLifecyclePolicyPreviewOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetLifecyclePolicyPreviewInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetLifecyclePolicyPreviewRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetLifecyclePolicyPreviewOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetRepositoryPolicy = "GetRepositoryPolicy" + +// GetRepositoryPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetRepositoryPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRepositoryPolicy for more information on using the GetRepositoryPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRepositoryPolicyRequest method. +// req, resp := client.GetRepositoryPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicy +func (c *ECR) GetRepositoryPolicyRequest(input *GetRepositoryPolicyInput) (req *request.Request, output *GetRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opGetRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRepositoryPolicyInput{} + } + + output = &GetRepositoryPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetRepositoryPolicy API operation for Amazon EC2 Container Registry. +// +// Retrieves the repository policy for the specified repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation GetRepositoryPolicy for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * RepositoryPolicyNotFoundException +// The specified repository and registry combination does not have an associated +// repository policy. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicy +func (c *ECR) GetRepositoryPolicy(input *GetRepositoryPolicyInput) (*GetRepositoryPolicyOutput, error) { + req, out := c.GetRepositoryPolicyRequest(input) + return out, req.Send() +} + +// GetRepositoryPolicyWithContext is the same as GetRepositoryPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetRepositoryPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetRepositoryPolicyWithContext(ctx aws.Context, input *GetRepositoryPolicyInput, opts ...request.Option) (*GetRepositoryPolicyOutput, error) { + req, out := c.GetRepositoryPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opInitiateLayerUpload = "InitiateLayerUpload" + +// InitiateLayerUploadRequest generates a "aws/request.Request" representing the +// client's request for the InitiateLayerUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See InitiateLayerUpload for more information on using the InitiateLayerUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the InitiateLayerUploadRequest method. +// req, resp := client.InitiateLayerUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload +func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req *request.Request, output *InitiateLayerUploadOutput) { + op := &request.Operation{ + Name: opInitiateLayerUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InitiateLayerUploadInput{} + } + + output = &InitiateLayerUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// InitiateLayerUpload API operation for Amazon EC2 Container Registry. +// +// Notifies Amazon ECR that you intend to upload an image layer. +// +// When an image is pushed, the InitiateLayerUpload API is called once per image +// layer that has not already been uploaded. Whether or not an image layer has +// been uploaded is determined by the BatchCheckLayerAvailability API action. +// +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation InitiateLayerUpload for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload +func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLayerUploadOutput, error) { + req, out := c.InitiateLayerUploadRequest(input) + return out, req.Send() +} + +// InitiateLayerUploadWithContext is the same as InitiateLayerUpload with the addition of +// the ability to pass a context and additional request options. +// +// See InitiateLayerUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) InitiateLayerUploadWithContext(ctx aws.Context, input *InitiateLayerUploadInput, opts ...request.Option) (*InitiateLayerUploadOutput, error) { + req, out := c.InitiateLayerUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListImages = "ListImages" + +// ListImagesRequest generates a "aws/request.Request" representing the +// client's request for the ListImages operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListImages for more information on using the ListImages +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListImagesRequest method. +// req, resp := client.ListImagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages +func (c *ECR) ListImagesRequest(input *ListImagesInput) (req *request.Request, output *ListImagesOutput) { + op := &request.Operation{ + Name: opListImages, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListImagesInput{} + } + + output = &ListImagesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListImages API operation for Amazon EC2 Container Registry. +// +// Lists all the image IDs for the specified repository. +// +// You can filter images based on whether or not they are tagged by using the +// tagStatus filter and specifying either TAGGED, UNTAGGED or ANY. For example, +// you can filter your results to return only UNTAGGED images and then pipe +// that result to a BatchDeleteImage operation to delete them. Or, you can filter +// your results to return only TAGGED images to list all of the tags in your +// repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation ListImages for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages +func (c *ECR) ListImages(input *ListImagesInput) (*ListImagesOutput, error) { + req, out := c.ListImagesRequest(input) + return out, req.Send() +} + +// ListImagesWithContext is the same as ListImages with the addition of +// the ability to pass a context and additional request options. +// +// See ListImages for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) ListImagesWithContext(ctx aws.Context, input *ListImagesInput, opts ...request.Option) (*ListImagesOutput, error) { + req, out := c.ListImagesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListImagesPages iterates over the pages of a ListImages operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListImages method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListImages operation. +// pageNum := 0 +// err := client.ListImagesPages(params, +// func(page *ecr.ListImagesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECR) ListImagesPages(input *ListImagesInput, fn func(*ListImagesOutput, bool) bool) error { + return c.ListImagesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListImagesPagesWithContext same as ListImagesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) ListImagesPagesWithContext(ctx aws.Context, input *ListImagesInput, fn func(*ListImagesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListImagesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListImagesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListImagesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListTagsForResource +func (c *ECR) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon EC2 Container Registry. +// +// List the tags for an Amazon ECR resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ServerException +// These errors are usually caused by a server-side issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListTagsForResource +func (c *ECR) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutImage = "PutImage" + +// PutImageRequest generates a "aws/request.Request" representing the +// client's request for the PutImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutImage for more information on using the PutImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutImageRequest method. +// req, resp := client.PutImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage +func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, output *PutImageOutput) { + op := &request.Operation{ + Name: opPutImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutImageInput{} + } + + output = &PutImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutImage API operation for Amazon EC2 Container Registry. +// +// Creates or updates the image manifest and tags associated with an image. +// +// When an image is pushed and all new image layers have been uploaded, the +// PutImage API is called once to create or update the image manifest and the +// tags associated with the image. +// +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation PutImage for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ImageAlreadyExistsException +// The specified image has already been pushed, and there were no changes to +// the manifest or image tag after the last push. +// +// * LayersNotFoundException +// The specified layers could not be found, or the specified layer is not valid +// for this repository. +// +// * ReferencedImagesNotFoundException +// The manifest list is referencing an image that does not exist. +// +// * LimitExceededException +// The operation did not succeed because it would have exceeded a service limit +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) +// in the Amazon Elastic Container Registry User Guide. +// +// * ImageTagAlreadyExistsException +// The specified image is tagged with a tag that already exists. The repository +// is configured for tag immutability. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage +func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) { + req, out := c.PutImageRequest(input) + return out, req.Send() +} + +// PutImageWithContext is the same as PutImage with the addition of +// the ability to pass a context and additional request options. +// +// See PutImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) PutImageWithContext(ctx aws.Context, input *PutImageInput, opts ...request.Option) (*PutImageOutput, error) { + req, out := c.PutImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutImageScanningConfiguration = "PutImageScanningConfiguration" + +// PutImageScanningConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutImageScanningConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutImageScanningConfiguration for more information on using the PutImageScanningConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutImageScanningConfigurationRequest method. +// req, resp := client.PutImageScanningConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageScanningConfiguration +func (c *ECR) PutImageScanningConfigurationRequest(input *PutImageScanningConfigurationInput) (req *request.Request, output *PutImageScanningConfigurationOutput) { + op := &request.Operation{ + Name: opPutImageScanningConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutImageScanningConfigurationInput{} + } + + output = &PutImageScanningConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutImageScanningConfiguration API operation for Amazon EC2 Container Registry. +// +// Updates the image scanning configuration for the specified repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation PutImageScanningConfiguration for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageScanningConfiguration +func (c *ECR) PutImageScanningConfiguration(input *PutImageScanningConfigurationInput) (*PutImageScanningConfigurationOutput, error) { + req, out := c.PutImageScanningConfigurationRequest(input) + return out, req.Send() +} + +// PutImageScanningConfigurationWithContext is the same as PutImageScanningConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutImageScanningConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) PutImageScanningConfigurationWithContext(ctx aws.Context, input *PutImageScanningConfigurationInput, opts ...request.Option) (*PutImageScanningConfigurationOutput, error) { + req, out := c.PutImageScanningConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutImageTagMutability = "PutImageTagMutability" + +// PutImageTagMutabilityRequest generates a "aws/request.Request" representing the +// client's request for the PutImageTagMutability operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutImageTagMutability for more information on using the PutImageTagMutability +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutImageTagMutabilityRequest method. +// req, resp := client.PutImageTagMutabilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageTagMutability +func (c *ECR) PutImageTagMutabilityRequest(input *PutImageTagMutabilityInput) (req *request.Request, output *PutImageTagMutabilityOutput) { + op := &request.Operation{ + Name: opPutImageTagMutability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutImageTagMutabilityInput{} + } + + output = &PutImageTagMutabilityOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutImageTagMutability API operation for Amazon EC2 Container Registry. +// +// Updates the image tag mutability settings for the specified repository. For +// more information, see Image Tag Mutability (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html) +// in the Amazon Elastic Container Registry User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation PutImageTagMutability for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageTagMutability +func (c *ECR) PutImageTagMutability(input *PutImageTagMutabilityInput) (*PutImageTagMutabilityOutput, error) { + req, out := c.PutImageTagMutabilityRequest(input) + return out, req.Send() +} + +// PutImageTagMutabilityWithContext is the same as PutImageTagMutability with the addition of +// the ability to pass a context and additional request options. +// +// See PutImageTagMutability for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) PutImageTagMutabilityWithContext(ctx aws.Context, input *PutImageTagMutabilityInput, opts ...request.Option) (*PutImageTagMutabilityOutput, error) { + req, out := c.PutImageTagMutabilityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutLifecyclePolicy = "PutLifecyclePolicy" + +// PutLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutLifecyclePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutLifecyclePolicy for more information on using the PutLifecyclePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutLifecyclePolicyRequest method. +// req, resp := client.PutLifecyclePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy +func (c *ECR) PutLifecyclePolicyRequest(input *PutLifecyclePolicyInput) (req *request.Request, output *PutLifecyclePolicyOutput) { + op := &request.Operation{ + Name: opPutLifecyclePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLifecyclePolicyInput{} + } + + output = &PutLifecyclePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutLifecyclePolicy API operation for Amazon EC2 Container Registry. +// +// Creates or updates the lifecycle policy for the specified repository. For +// more information, see Lifecycle Policy Template (https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation PutLifecyclePolicy for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy +func (c *ECR) PutLifecyclePolicy(input *PutLifecyclePolicyInput) (*PutLifecyclePolicyOutput, error) { + req, out := c.PutLifecyclePolicyRequest(input) + return out, req.Send() +} + +// PutLifecyclePolicyWithContext is the same as PutLifecyclePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutLifecyclePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) PutLifecyclePolicyWithContext(ctx aws.Context, input *PutLifecyclePolicyInput, opts ...request.Option) (*PutLifecyclePolicyOutput, error) { + req, out := c.PutLifecyclePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetRepositoryPolicy = "SetRepositoryPolicy" + +// SetRepositoryPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SetRepositoryPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetRepositoryPolicy for more information on using the SetRepositoryPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetRepositoryPolicyRequest method. +// req, resp := client.SetRepositoryPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy +func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opSetRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetRepositoryPolicyInput{} + } + + output = &SetRepositoryPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// SetRepositoryPolicy API operation for Amazon EC2 Container Registry. +// +// Applies a repository policy to the specified repository to control access +// permissions. For more information, see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html) +// in the Amazon Elastic Container Registry User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation SetRepositoryPolicy for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy +func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) { + req, out := c.SetRepositoryPolicyRequest(input) + return out, req.Send() +} + +// SetRepositoryPolicyWithContext is the same as SetRepositoryPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See SetRepositoryPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetRepositoryPolicyInput, opts ...request.Option) (*SetRepositoryPolicyOutput, error) { + req, out := c.SetRepositoryPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartImageScan = "StartImageScan" + +// StartImageScanRequest generates a "aws/request.Request" representing the +// client's request for the StartImageScan operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartImageScan for more information on using the StartImageScan +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartImageScanRequest method. +// req, resp := client.StartImageScanRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartImageScan +func (c *ECR) StartImageScanRequest(input *StartImageScanInput) (req *request.Request, output *StartImageScanOutput) { + op := &request.Operation{ + Name: opStartImageScan, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartImageScanInput{} + } + + output = &StartImageScanOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartImageScan API operation for Amazon EC2 Container Registry. +// +// Starts an image vulnerability scan. An image scan can only be started once +// per day on an individual image. This limit includes if an image was scanned +// on initial push. For more information, see Image Scanning (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html) +// in the Amazon Elastic Container Registry User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation StartImageScan for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * UnsupportedImageTypeException +// The image is of a type that cannot be scanned. +// +// * LimitExceededException +// The operation did not succeed because it would have exceeded a service limit +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) +// in the Amazon Elastic Container Registry User Guide. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ImageNotFoundException +// The image requested does not exist in the specified repository. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartImageScan +func (c *ECR) StartImageScan(input *StartImageScanInput) (*StartImageScanOutput, error) { + req, out := c.StartImageScanRequest(input) + return out, req.Send() +} + +// StartImageScanWithContext is the same as StartImageScan with the addition of +// the ability to pass a context and additional request options. +// +// See StartImageScan for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) StartImageScanWithContext(ctx aws.Context, input *StartImageScanInput, opts ...request.Option) (*StartImageScanOutput, error) { + req, out := c.StartImageScanRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartLifecyclePolicyPreview = "StartLifecyclePolicyPreview" + +// StartLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the +// client's request for the StartLifecyclePolicyPreview operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartLifecyclePolicyPreview for more information on using the StartLifecyclePolicyPreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartLifecyclePolicyPreviewRequest method. +// req, resp := client.StartLifecyclePolicyPreviewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPreviewInput) (req *request.Request, output *StartLifecyclePolicyPreviewOutput) { + op := &request.Operation{ + Name: opStartLifecyclePolicyPreview, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartLifecyclePolicyPreviewInput{} + } + + output = &StartLifecyclePolicyPreviewOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// +// Starts a preview of a lifecycle policy for the specified repository. This +// allows you to see the results before associating the lifecycle policy with +// the repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation StartLifecyclePolicyPreview for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * LifecyclePolicyNotFoundException +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// * LifecyclePolicyPreviewInProgressException +// The previous lifecycle policy preview request has not completed. Please try +// again later. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + return out, req.Send() +} + +// StartLifecyclePolicyPreviewWithContext is the same as StartLifecyclePolicyPreview with the addition of +// the ability to pass a context and additional request options. +// +// See StartLifecyclePolicyPreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) StartLifecyclePolicyPreviewWithContext(ctx aws.Context, input *StartLifecyclePolicyPreviewInput, opts ...request.Option) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/TagResource +func (c *ECR) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon EC2 Container Registry. +// +// Adds specified tags to a resource with the specified ARN. Existing tags on +// a resource are not changed if they are not specified in the request parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * InvalidTagParameterException +// An invalid parameter has been specified. Tag keys can have a maximum character +// length of 128 characters, and tag values can have a maximum length of 256 +// characters. +// +// * TooManyTagsException +// The list of tags on the repository is over the limit. The maximum number +// of tags that can be applied to a repository is 50. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ServerException +// These errors are usually caused by a server-side issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/TagResource +func (c *ECR) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UntagResource +func (c *ECR) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon EC2 Container Registry. +// +// Deletes specified tags from a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * InvalidTagParameterException +// An invalid parameter has been specified. Tag keys can have a maximum character +// length of 128 characters, and tag values can have a maximum length of 256 +// characters. +// +// * TooManyTagsException +// The list of tags on the repository is over the limit. The maximum number +// of tags that can be applied to a repository is 50. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ServerException +// These errors are usually caused by a server-side issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UntagResource +func (c *ECR) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadLayerPart = "UploadLayerPart" + +// UploadLayerPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadLayerPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadLayerPart for more information on using the UploadLayerPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadLayerPartRequest method. +// req, resp := client.UploadLayerPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart +func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.Request, output *UploadLayerPartOutput) { + op := &request.Operation{ + Name: opUploadLayerPart, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadLayerPartInput{} + } + + output = &UploadLayerPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadLayerPart API operation for Amazon EC2 Container Registry. +// +// Uploads an image layer part to Amazon ECR. +// +// When an image is pushed, each new image layer is uploaded in parts. The maximum +// size of each image layer part can be 20971520 bytes (or about 20MB). The +// UploadLayerPart API is called once per each new image layer part. +// +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation UploadLayerPart for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server-side issue. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * InvalidLayerPartException +// The layer part size is not valid, or the first byte specified is not consecutive +// to the last byte of a previous layer part upload. +// +// * RepositoryNotFoundException +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * UploadNotFoundException +// The upload could not be found, or the specified upload id is not valid for +// this repository. +// +// * LimitExceededException +// The operation did not succeed because it would have exceeded a service limit +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) +// in the Amazon Elastic Container Registry User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart +func (c *ECR) UploadLayerPart(input *UploadLayerPartInput) (*UploadLayerPartOutput, error) { + req, out := c.UploadLayerPartRequest(input) + return out, req.Send() +} + +// UploadLayerPartWithContext is the same as UploadLayerPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadLayerPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) UploadLayerPartWithContext(ctx aws.Context, input *UploadLayerPartInput, opts ...request.Option) (*UploadLayerPartOutput, error) { + req, out := c.UploadLayerPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// This data type is used in the ImageScanFinding data type. +type Attribute struct { + _ struct{} `type:"structure"` + + // The attribute key. + // + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The value assigned to the attribute key. + Value *string `locationName:"value" min:"1" type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *Attribute) SetKey(v string) *Attribute { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Attribute) SetValue(v string) *Attribute { + s.Value = &v + return s +} + +// An object representing authorization data for an Amazon ECR registry. +type AuthorizationData struct { + _ struct{} `type:"structure"` + + // A base64-encoded string that contains authorization data for the specified + // Amazon ECR registry. When the string is decoded, it is presented in the format + // user:password for private registry authentication using docker login. + AuthorizationToken *string `locationName:"authorizationToken" type:"string"` + + // The Unix time in seconds and milliseconds when the authorization token expires. + // Authorization tokens are valid for 12 hours. + ExpiresAt *time.Time `locationName:"expiresAt" type:"timestamp"` + + // The registry URL to use for this authorization token in a docker login command. + // The Amazon ECR registry URL format is https://aws_account_id.dkr.ecr.region.amazonaws.com. + // For example, https://012345678910.dkr.ecr.us-east-1.amazonaws.com.. + ProxyEndpoint *string `locationName:"proxyEndpoint" type:"string"` +} + +// String returns the string representation +func (s AuthorizationData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizationData) GoString() string { + return s.String() +} + +// SetAuthorizationToken sets the AuthorizationToken field's value. +func (s *AuthorizationData) SetAuthorizationToken(v string) *AuthorizationData { + s.AuthorizationToken = &v + return s +} + +// SetExpiresAt sets the ExpiresAt field's value. +func (s *AuthorizationData) SetExpiresAt(v time.Time) *AuthorizationData { + s.ExpiresAt = &v + return s +} + +// SetProxyEndpoint sets the ProxyEndpoint field's value. +func (s *AuthorizationData) SetProxyEndpoint(v string) *AuthorizationData { + s.ProxyEndpoint = &v + return s +} + +type BatchCheckLayerAvailabilityInput struct { + _ struct{} `type:"structure"` + + // The digests of the image layers to check. + // + // LayerDigests is a required field + LayerDigests []*string `locationName:"layerDigests" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry that contains the image layers + // to check. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the image layers to check. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchCheckLayerAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchCheckLayerAvailabilityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchCheckLayerAvailabilityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchCheckLayerAvailabilityInput"} + if s.LayerDigests == nil { + invalidParams.Add(request.NewErrParamRequired("LayerDigests")) + } + if s.LayerDigests != nil && len(s.LayerDigests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LayerDigests", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLayerDigests sets the LayerDigests field's value. +func (s *BatchCheckLayerAvailabilityInput) SetLayerDigests(v []*string) *BatchCheckLayerAvailabilityInput { + s.LayerDigests = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *BatchCheckLayerAvailabilityInput) SetRegistryId(v string) *BatchCheckLayerAvailabilityInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *BatchCheckLayerAvailabilityInput) SetRepositoryName(v string) *BatchCheckLayerAvailabilityInput { + s.RepositoryName = &v + return s +} + +type BatchCheckLayerAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*LayerFailure `locationName:"failures" type:"list"` + + // A list of image layer objects corresponding to the image layer references + // in the request. + Layers []*Layer `locationName:"layers" type:"list"` +} + +// String returns the string representation +func (s BatchCheckLayerAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchCheckLayerAvailabilityOutput) GoString() string { + return s.String() +} + +// SetFailures sets the Failures field's value. +func (s *BatchCheckLayerAvailabilityOutput) SetFailures(v []*LayerFailure) *BatchCheckLayerAvailabilityOutput { + s.Failures = v + return s +} + +// SetLayers sets the Layers field's value. +func (s *BatchCheckLayerAvailabilityOutput) SetLayers(v []*Layer) *BatchCheckLayerAvailabilityOutput { + s.Layers = v + return s +} + +// Deletes specified images within a specified repository. Images are specified +// with either the imageTag or imageDigest. +type BatchDeleteImageInput struct { + _ struct{} `type:"structure"` + + // A list of image ID references that correspond to images to delete. The format + // of the imageIds reference is imageTag=tag or imageDigest=digest. + // + // ImageIds is a required field + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry that contains the image to + // delete. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository that contains the image to delete. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchDeleteImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDeleteImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDeleteImageInput"} + if s.ImageIds == nil { + invalidParams.Add(request.NewErrParamRequired("ImageIds")) + } + if s.ImageIds != nil && len(s.ImageIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.ImageIds != nil { + for i, v := range s.ImageIds { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ImageIds", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageIds sets the ImageIds field's value. +func (s *BatchDeleteImageInput) SetImageIds(v []*ImageIdentifier) *BatchDeleteImageInput { + s.ImageIds = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *BatchDeleteImageInput) SetRegistryId(v string) *BatchDeleteImageInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *BatchDeleteImageInput) SetRepositoryName(v string) *BatchDeleteImageInput { + s.RepositoryName = &v + return s +} + +type BatchDeleteImageOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*ImageFailure `locationName:"failures" type:"list"` + + // The image IDs of the deleted images. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` +} + +// String returns the string representation +func (s BatchDeleteImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteImageOutput) GoString() string { + return s.String() +} + +// SetFailures sets the Failures field's value. +func (s *BatchDeleteImageOutput) SetFailures(v []*ImageFailure) *BatchDeleteImageOutput { + s.Failures = v + return s +} + +// SetImageIds sets the ImageIds field's value. +func (s *BatchDeleteImageOutput) SetImageIds(v []*ImageIdentifier) *BatchDeleteImageOutput { + s.ImageIds = v + return s +} + +type BatchGetImageInput struct { + _ struct{} `type:"structure"` + + // The accepted media types for the request. + // + // Valid values: application/vnd.docker.distribution.manifest.v1+json | application/vnd.docker.distribution.manifest.v2+json + // | application/vnd.oci.image.manifest.v1+json + AcceptedMediaTypes []*string `locationName:"acceptedMediaTypes" min:"1" type:"list"` + + // A list of image ID references that correspond to images to describe. The + // format of the imageIds reference is imageTag=tag or imageDigest=digest. + // + // ImageIds is a required field + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry that contains the images + // to describe. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository that contains the images to describe. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchGetImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetImageInput"} + if s.AcceptedMediaTypes != nil && len(s.AcceptedMediaTypes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AcceptedMediaTypes", 1)) + } + if s.ImageIds == nil { + invalidParams.Add(request.NewErrParamRequired("ImageIds")) + } + if s.ImageIds != nil && len(s.ImageIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.ImageIds != nil { + for i, v := range s.ImageIds { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ImageIds", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptedMediaTypes sets the AcceptedMediaTypes field's value. +func (s *BatchGetImageInput) SetAcceptedMediaTypes(v []*string) *BatchGetImageInput { + s.AcceptedMediaTypes = v + return s +} + +// SetImageIds sets the ImageIds field's value. +func (s *BatchGetImageInput) SetImageIds(v []*ImageIdentifier) *BatchGetImageInput { + s.ImageIds = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *BatchGetImageInput) SetRegistryId(v string) *BatchGetImageInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *BatchGetImageInput) SetRepositoryName(v string) *BatchGetImageInput { + s.RepositoryName = &v + return s +} + +type BatchGetImageOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*ImageFailure `locationName:"failures" type:"list"` + + // A list of image objects corresponding to the image references in the request. + Images []*Image `locationName:"images" type:"list"` +} + +// String returns the string representation +func (s BatchGetImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetImageOutput) GoString() string { + return s.String() +} + +// SetFailures sets the Failures field's value. +func (s *BatchGetImageOutput) SetFailures(v []*ImageFailure) *BatchGetImageOutput { + s.Failures = v + return s +} + +// SetImages sets the Images field's value. +func (s *BatchGetImageOutput) SetImages(v []*Image) *BatchGetImageOutput { + s.Images = v + return s +} + +type CompleteLayerUploadInput struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image layer. + // + // LayerDigests is a required field + LayerDigests []*string `locationName:"layerDigests" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry to which to upload layers. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to associate with the image layer. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` + + // The upload ID from a previous InitiateLayerUpload operation to associate + // with the image layer. + // + // UploadId is a required field + UploadId *string `locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteLayerUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLayerUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteLayerUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteLayerUploadInput"} + if s.LayerDigests == nil { + invalidParams.Add(request.NewErrParamRequired("LayerDigests")) + } + if s.LayerDigests != nil && len(s.LayerDigests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LayerDigests", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLayerDigests sets the LayerDigests field's value. +func (s *CompleteLayerUploadInput) SetLayerDigests(v []*string) *CompleteLayerUploadInput { + s.LayerDigests = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *CompleteLayerUploadInput) SetRegistryId(v string) *CompleteLayerUploadInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *CompleteLayerUploadInput) SetRepositoryName(v string) *CompleteLayerUploadInput { + s.RepositoryName = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteLayerUploadInput) SetUploadId(v string) *CompleteLayerUploadInput { + s.UploadId = &v + return s +} + +type CompleteLayerUploadOutput struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image layer. + LayerDigest *string `locationName:"layerDigest" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The upload ID associated with the layer. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s CompleteLayerUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLayerUploadOutput) GoString() string { + return s.String() +} + +// SetLayerDigest sets the LayerDigest field's value. +func (s *CompleteLayerUploadOutput) SetLayerDigest(v string) *CompleteLayerUploadOutput { + s.LayerDigest = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *CompleteLayerUploadOutput) SetRegistryId(v string) *CompleteLayerUploadOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *CompleteLayerUploadOutput) SetRepositoryName(v string) *CompleteLayerUploadOutput { + s.RepositoryName = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteLayerUploadOutput) SetUploadId(v string) *CompleteLayerUploadOutput { + s.UploadId = &v + return s +} + +type CreateRepositoryInput struct { + _ struct{} `type:"structure"` + + // The image scanning configuration for the repository. This setting determines + // whether images are scanned for known vulnerabilities after being pushed to + // the repository. + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` + + // The tag mutability setting for the repository. If this parameter is omitted, + // the default setting of MUTABLE will be used which will allow image tags to + // be overwritten. If IMMUTABLE is specified, all image tags within the repository + // will be immutable which will prevent them from being overwritten. + ImageTagMutability *string `locationName:"imageTagMutability" type:"string" enum:"ImageTagMutability"` + + // The name to use for the repository. The repository name may be specified + // on its own (such as nginx-web-app) or it can be prepended with a namespace + // to group the repository into a category (such as project-a/nginx-web-app). + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` + + // The metadata that you apply to the repository to help you categorize and + // organize them. Each tag consists of a key and an optional value, both of + // which you define. Tag keys can have a maximum character length of 128 characters, + // and tag values can have a maximum length of 256 characters. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s CreateRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRepositoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRepositoryInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *CreateRepositoryInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *CreateRepositoryInput { + s.ImageScanningConfiguration = v + return s +} + +// SetImageTagMutability sets the ImageTagMutability field's value. +func (s *CreateRepositoryInput) SetImageTagMutability(v string) *CreateRepositoryInput { + s.ImageTagMutability = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *CreateRepositoryInput) SetRepositoryName(v string) *CreateRepositoryInput { + s.RepositoryName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateRepositoryInput) SetTags(v []*Tag) *CreateRepositoryInput { + s.Tags = v + return s +} + +type CreateRepositoryOutput struct { + _ struct{} `type:"structure"` + + // The repository that was created. + Repository *Repository `locationName:"repository" type:"structure"` +} + +// String returns the string representation +func (s CreateRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryOutput) GoString() string { + return s.String() +} + +// SetRepository sets the Repository field's value. +func (s *CreateRepositoryOutput) SetRepository(v *Repository) *CreateRepositoryOutput { + s.Repository = v + return s +} + +type DeleteLifecyclePolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLifecyclePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecyclePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLifecyclePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLifecyclePolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteLifecyclePolicyInput) SetRegistryId(v string) *DeleteLifecyclePolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteLifecyclePolicyInput) SetRepositoryName(v string) *DeleteLifecyclePolicyInput { + s.RepositoryName = &v + return s +} + +type DeleteLifecyclePolicyOutput struct { + _ struct{} `type:"structure"` + + // The time stamp of the last time that the lifecycle policy was run. + LastEvaluatedAt *time.Time `locationName:"lastEvaluatedAt" type:"timestamp"` + + // The JSON lifecycle policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s DeleteLifecyclePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecyclePolicyOutput) GoString() string { + return s.String() +} + +// SetLastEvaluatedAt sets the LastEvaluatedAt field's value. +func (s *DeleteLifecyclePolicyOutput) SetLastEvaluatedAt(v time.Time) *DeleteLifecyclePolicyOutput { + s.LastEvaluatedAt = &v + return s +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *DeleteLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *DeleteLifecyclePolicyOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteLifecyclePolicyOutput) SetRegistryId(v string) *DeleteLifecyclePolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteLifecyclePolicyOutput) SetRepositoryName(v string) *DeleteLifecyclePolicyOutput { + s.RepositoryName = &v + return s +} + +type DeleteRepositoryInput struct { + _ struct{} `type:"structure"` + + // If a repository contains images, forces the deletion. + Force *bool `locationName:"force" type:"boolean"` + + // The AWS account ID associated with the registry that contains the repository + // to delete. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to delete. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRepositoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForce sets the Force field's value. +func (s *DeleteRepositoryInput) SetForce(v bool) *DeleteRepositoryInput { + s.Force = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteRepositoryInput) SetRegistryId(v string) *DeleteRepositoryInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteRepositoryInput) SetRepositoryName(v string) *DeleteRepositoryInput { + s.RepositoryName = &v + return s +} + +type DeleteRepositoryOutput struct { + _ struct{} `type:"structure"` + + // The repository that was deleted. + Repository *Repository `locationName:"repository" type:"structure"` +} + +// String returns the string representation +func (s DeleteRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryOutput) GoString() string { + return s.String() +} + +// SetRepository sets the Repository field's value. +func (s *DeleteRepositoryOutput) SetRepository(v *Repository) *DeleteRepositoryOutput { + s.Repository = v + return s +} + +type DeleteRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository + // policy to delete. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the repository policy + // to delete. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRepositoryPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryPolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteRepositoryPolicyInput) SetRegistryId(v string) *DeleteRepositoryPolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteRepositoryPolicyInput) SetRepositoryName(v string) *DeleteRepositoryPolicyInput { + s.RepositoryName = &v + return s +} + +type DeleteRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy that was deleted from the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s DeleteRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicyText sets the PolicyText field's value. +func (s *DeleteRepositoryPolicyOutput) SetPolicyText(v string) *DeleteRepositoryPolicyOutput { + s.PolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteRepositoryPolicyOutput) SetRegistryId(v string) *DeleteRepositoryPolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteRepositoryPolicyOutput) SetRepositoryName(v string) *DeleteRepositoryPolicyOutput { + s.RepositoryName = &v + return s +} + +type DescribeImageScanFindingsInput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + // + // ImageId is a required field + ImageId *ImageIdentifier `locationName:"imageId" type:"structure" required:"true"` + + // The maximum number of image scan results returned by DescribeImageScanFindings + // in paginated output. When this parameter is used, DescribeImageScanFindings + // only returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeImageScanFindings request with the returned nextToken value. + // This value can be between 1 and 1000. If this parameter is not used, then + // DescribeImageScanFindings returns up to 100 results and a nextToken value, + // if applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated DescribeImageScanFindings + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository + // in which to describe the image scan findings for. If you do not specify a + // registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository for the image for which to describe the scan findings. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeImageScanFindingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageScanFindingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeImageScanFindingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImageScanFindingsInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.ImageId != nil { + if err := s.ImageId.Validate(); err != nil { + invalidParams.AddNested("ImageId", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageId sets the ImageId field's value. +func (s *DescribeImageScanFindingsInput) SetImageId(v *ImageIdentifier) *DescribeImageScanFindingsInput { + s.ImageId = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeImageScanFindingsInput) SetMaxResults(v int64) *DescribeImageScanFindingsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImageScanFindingsInput) SetNextToken(v string) *DescribeImageScanFindingsInput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DescribeImageScanFindingsInput) SetRegistryId(v string) *DescribeImageScanFindingsInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DescribeImageScanFindingsInput) SetRepositoryName(v string) *DescribeImageScanFindingsInput { + s.RepositoryName = &v + return s +} + +type DescribeImageScanFindingsOutput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + + // The information contained in the image scan findings. + ImageScanFindings *ImageScanFindings `locationName:"imageScanFindings" type:"structure"` + + // The current state of the scan. + ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"` + + // The nextToken value to include in a future DescribeImageScanFindings request. + // When the results of a DescribeImageScanFindings request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s DescribeImageScanFindingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageScanFindingsOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *DescribeImageScanFindingsOutput) SetImageId(v *ImageIdentifier) *DescribeImageScanFindingsOutput { + s.ImageId = v + return s +} + +// SetImageScanFindings sets the ImageScanFindings field's value. +func (s *DescribeImageScanFindingsOutput) SetImageScanFindings(v *ImageScanFindings) *DescribeImageScanFindingsOutput { + s.ImageScanFindings = v + return s +} + +// SetImageScanStatus sets the ImageScanStatus field's value. +func (s *DescribeImageScanFindingsOutput) SetImageScanStatus(v *ImageScanStatus) *DescribeImageScanFindingsOutput { + s.ImageScanStatus = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImageScanFindingsOutput) SetNextToken(v string) *DescribeImageScanFindingsOutput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DescribeImageScanFindingsOutput) SetRegistryId(v string) *DescribeImageScanFindingsOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DescribeImageScanFindingsOutput) SetRepositoryName(v string) *DescribeImageScanFindingsOutput { + s.RepositoryName = &v + return s +} + +// An object representing a filter on a DescribeImages operation. +type DescribeImagesFilter struct { + _ struct{} `type:"structure"` + + // The tag status with which to filter your DescribeImages results. You can + // filter results based on whether they are TAGGED or UNTAGGED. + TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"` +} + +// String returns the string representation +func (s DescribeImagesFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesFilter) GoString() string { + return s.String() +} + +// SetTagStatus sets the TagStatus field's value. +func (s *DescribeImagesFilter) SetTagStatus(v string) *DescribeImagesFilter { + s.TagStatus = &v + return s +} + +type DescribeImagesInput struct { + _ struct{} `type:"structure"` + + // The filter key and value with which to filter your DescribeImages results. + Filter *DescribeImagesFilter `locationName:"filter" type:"structure"` + + // The list of image IDs for the requested repository. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` + + // The maximum number of repository results returned by DescribeImages in paginated + // output. When this parameter is used, DescribeImages only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeImages + // request with the returned nextToken value. This value can be between 1 and + // 1000. If this parameter is not used, then DescribeImages returns up to 100 + // results and a nextToken value, if applicable. This option cannot be used + // when you specify images with imageIds. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated DescribeImages request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + // This option cannot be used when you specify images with imageIds. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository + // in which to describe images. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository that contains the images to describe. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeImagesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImagesInput"} + if s.ImageIds != nil && len(s.ImageIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.ImageIds != nil { + for i, v := range s.ImageIds { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ImageIds", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *DescribeImagesInput) SetFilter(v *DescribeImagesFilter) *DescribeImagesInput { + s.Filter = v + return s +} + +// SetImageIds sets the ImageIds field's value. +func (s *DescribeImagesInput) SetImageIds(v []*ImageIdentifier) *DescribeImagesInput { + s.ImageIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeImagesInput) SetMaxResults(v int64) *DescribeImagesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImagesInput) SetNextToken(v string) *DescribeImagesInput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DescribeImagesInput) SetRegistryId(v string) *DescribeImagesInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DescribeImagesInput) SetRepositoryName(v string) *DescribeImagesInput { + s.RepositoryName = &v + return s +} + +type DescribeImagesOutput struct { + _ struct{} `type:"structure"` + + // A list of ImageDetail objects that contain data about the image. + ImageDetails []*ImageDetail `locationName:"imageDetails" type:"list"` + + // The nextToken value to include in a future DescribeImages request. When the + // results of a DescribeImages request exceed maxResults, this value can be + // used to retrieve the next page of results. This value is null when there + // are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesOutput) GoString() string { + return s.String() +} + +// SetImageDetails sets the ImageDetails field's value. +func (s *DescribeImagesOutput) SetImageDetails(v []*ImageDetail) *DescribeImagesOutput { + s.ImageDetails = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImagesOutput) SetNextToken(v string) *DescribeImagesOutput { + s.NextToken = &v + return s +} + +type DescribeRepositoriesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of repository results returned by DescribeRepositories + // in paginated output. When this parameter is used, DescribeRepositories only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeRepositories request with the returned nextToken value. This + // value can be between 1 and 1000. If this parameter is not used, then DescribeRepositories + // returns up to 100 results and a nextToken value, if applicable. This option + // cannot be used when you specify repositories with repositoryNames. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated DescribeRepositories + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. This option cannot be used when you specify repositories with + // repositoryNames. + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repositories + // to be described. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // A list of repositories to describe. If this parameter is omitted, then all + // repositories in a registry are described. + RepositoryNames []*string `locationName:"repositoryNames" min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRepositoriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRepositoriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRepositoriesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RepositoryNames != nil && len(s.RepositoryNames) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryNames", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeRepositoriesInput) SetMaxResults(v int64) *DescribeRepositoriesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeRepositoriesInput) SetNextToken(v string) *DescribeRepositoriesInput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DescribeRepositoriesInput) SetRegistryId(v string) *DescribeRepositoriesInput { + s.RegistryId = &v + return s +} + +// SetRepositoryNames sets the RepositoryNames field's value. +func (s *DescribeRepositoriesInput) SetRepositoryNames(v []*string) *DescribeRepositoriesInput { + s.RepositoryNames = v + return s +} + +type DescribeRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future DescribeRepositories request. + // When the results of a DescribeRepositories request exceed maxResults, this + // value can be used to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of repository objects corresponding to valid repositories. + Repositories []*Repository `locationName:"repositories" type:"list"` +} + +// String returns the string representation +func (s DescribeRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRepositoriesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeRepositoriesOutput) SetNextToken(v string) *DescribeRepositoriesOutput { + s.NextToken = &v + return s +} + +// SetRepositories sets the Repositories field's value. +func (s *DescribeRepositoriesOutput) SetRepositories(v []*Repository) *DescribeRepositoriesOutput { + s.Repositories = v + return s +} + +// The specified layer upload does not contain any layer parts. +type EmptyUploadException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s EmptyUploadException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EmptyUploadException) GoString() string { + return s.String() +} + +func newErrorEmptyUploadException(v protocol.ResponseMetadata) error { + return &EmptyUploadException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *EmptyUploadException) Code() string { + return "EmptyUploadException" +} + +// Message returns the exception's message. +func (s *EmptyUploadException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *EmptyUploadException) OrigErr() error { + return nil +} + +func (s *EmptyUploadException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *EmptyUploadException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *EmptyUploadException) RequestID() string { + return s.RespMetadata.RequestID +} + +type GetAuthorizationTokenInput struct { + _ struct{} `type:"structure"` + + // A list of AWS account IDs that are associated with the registries for which + // to get AuthorizationData objects. If you do not specify a registry, the default + // registry is assumed. + RegistryIds []*string `locationName:"registryIds" min:"1" type:"list"` +} + +// String returns the string representation +func (s GetAuthorizationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAuthorizationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAuthorizationTokenInput"} + if s.RegistryIds != nil && len(s.RegistryIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegistryIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryIds sets the RegistryIds field's value. +func (s *GetAuthorizationTokenInput) SetRegistryIds(v []*string) *GetAuthorizationTokenInput { + s.RegistryIds = v + return s +} + +type GetAuthorizationTokenOutput struct { + _ struct{} `type:"structure"` + + // A list of authorization token data objects that correspond to the registryIds + // values in the request. + AuthorizationData []*AuthorizationData `locationName:"authorizationData" type:"list"` +} + +// String returns the string representation +func (s GetAuthorizationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizationTokenOutput) GoString() string { + return s.String() +} + +// SetAuthorizationData sets the AuthorizationData field's value. +func (s *GetAuthorizationTokenOutput) SetAuthorizationData(v []*AuthorizationData) *GetAuthorizationTokenOutput { + s.AuthorizationData = v + return s +} + +type GetDownloadUrlForLayerInput struct { + _ struct{} `type:"structure"` + + // The digest of the image layer to download. + // + // LayerDigest is a required field + LayerDigest *string `locationName:"layerDigest" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the image layer + // to download. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the image layer to download. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDownloadUrlForLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDownloadUrlForLayerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDownloadUrlForLayerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDownloadUrlForLayerInput"} + if s.LayerDigest == nil { + invalidParams.Add(request.NewErrParamRequired("LayerDigest")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLayerDigest sets the LayerDigest field's value. +func (s *GetDownloadUrlForLayerInput) SetLayerDigest(v string) *GetDownloadUrlForLayerInput { + s.LayerDigest = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetDownloadUrlForLayerInput) SetRegistryId(v string) *GetDownloadUrlForLayerInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetDownloadUrlForLayerInput) SetRepositoryName(v string) *GetDownloadUrlForLayerInput { + s.RepositoryName = &v + return s +} + +type GetDownloadUrlForLayerOutput struct { + _ struct{} `type:"structure"` + + // The pre-signed Amazon S3 download URL for the requested layer. + DownloadUrl *string `locationName:"downloadUrl" type:"string"` + + // The digest of the image layer to download. + LayerDigest *string `locationName:"layerDigest" type:"string"` +} + +// String returns the string representation +func (s GetDownloadUrlForLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDownloadUrlForLayerOutput) GoString() string { + return s.String() +} + +// SetDownloadUrl sets the DownloadUrl field's value. +func (s *GetDownloadUrlForLayerOutput) SetDownloadUrl(v string) *GetDownloadUrlForLayerOutput { + s.DownloadUrl = &v + return s +} + +// SetLayerDigest sets the LayerDigest field's value. +func (s *GetDownloadUrlForLayerOutput) SetLayerDigest(v string) *GetDownloadUrlForLayerOutput { + s.LayerDigest = &v + return s +} + +type GetLifecyclePolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLifecyclePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLifecyclePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLifecyclePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLifecyclePolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyInput) SetRegistryId(v string) *GetLifecyclePolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyInput) SetRepositoryName(v string) *GetLifecyclePolicyInput { + s.RepositoryName = &v + return s +} + +type GetLifecyclePolicyOutput struct { + _ struct{} `type:"structure"` + + // The time stamp of the last time that the lifecycle policy was run. + LastEvaluatedAt *time.Time `locationName:"lastEvaluatedAt" type:"timestamp"` + + // The JSON lifecycle policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s GetLifecyclePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLifecyclePolicyOutput) GoString() string { + return s.String() +} + +// SetLastEvaluatedAt sets the LastEvaluatedAt field's value. +func (s *GetLifecyclePolicyOutput) SetLastEvaluatedAt(v time.Time) *GetLifecyclePolicyOutput { + s.LastEvaluatedAt = &v + return s +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *GetLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *GetLifecyclePolicyOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyOutput) SetRegistryId(v string) *GetLifecyclePolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyOutput) SetRepositoryName(v string) *GetLifecyclePolicyOutput { + s.RepositoryName = &v + return s +} + +type GetLifecyclePolicyPreviewInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that filters results based on image tag status and + // all tags, if tagged. + Filter *LifecyclePolicyPreviewFilter `locationName:"filter" type:"structure"` + + // The list of imageIDs to be included. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` + + // The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest + // in paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest + // only returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another GetLifecyclePolicyPreviewRequest request with the returned nextToken + // value. This value can be between 1 and 1000. If this parameter is not used, + // then GetLifecyclePolicyPreviewRequest returns up to 100 results and a nextToken + // value, if applicable. This option cannot be used when you specify images + // with imageIds. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated GetLifecyclePolicyPreviewRequest + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. This option cannot be used when you specify images with imageIds. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLifecyclePolicyPreviewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLifecyclePolicyPreviewInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLifecyclePolicyPreviewInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLifecyclePolicyPreviewInput"} + if s.ImageIds != nil && len(s.ImageIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.ImageIds != nil { + for i, v := range s.ImageIds { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ImageIds", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *GetLifecyclePolicyPreviewInput) SetFilter(v *LifecyclePolicyPreviewFilter) *GetLifecyclePolicyPreviewInput { + s.Filter = v + return s +} + +// SetImageIds sets the ImageIds field's value. +func (s *GetLifecyclePolicyPreviewInput) SetImageIds(v []*ImageIdentifier) *GetLifecyclePolicyPreviewInput { + s.ImageIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetLifecyclePolicyPreviewInput) SetMaxResults(v int64) *GetLifecyclePolicyPreviewInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetLifecyclePolicyPreviewInput) SetNextToken(v string) *GetLifecyclePolicyPreviewInput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyPreviewInput) SetRegistryId(v string) *GetLifecyclePolicyPreviewInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyPreviewInput) SetRepositoryName(v string) *GetLifecyclePolicyPreviewInput { + s.RepositoryName = &v + return s +} + +type GetLifecyclePolicyPreviewOutput struct { + _ struct{} `type:"structure"` + + // The JSON lifecycle policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The nextToken value to include in a future GetLifecyclePolicyPreview request. + // When the results of a GetLifecyclePolicyPreview request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The results of the lifecycle policy preview request. + PreviewResults []*LifecyclePolicyPreviewResult `locationName:"previewResults" type:"list"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The status of the lifecycle policy preview request. + Status *string `locationName:"status" type:"string" enum:"LifecyclePolicyPreviewStatus"` + + // The list of images that is returned as a result of the action. + Summary *LifecyclePolicyPreviewSummary `locationName:"summary" type:"structure"` +} + +// String returns the string representation +func (s GetLifecyclePolicyPreviewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLifecyclePolicyPreviewOutput) GoString() string { + return s.String() +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetLifecyclePolicyText(v string) *GetLifecyclePolicyPreviewOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetNextToken(v string) *GetLifecyclePolicyPreviewOutput { + s.NextToken = &v + return s +} + +// SetPreviewResults sets the PreviewResults field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetPreviewResults(v []*LifecyclePolicyPreviewResult) *GetLifecyclePolicyPreviewOutput { + s.PreviewResults = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetRegistryId(v string) *GetLifecyclePolicyPreviewOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetRepositoryName(v string) *GetLifecyclePolicyPreviewOutput { + s.RepositoryName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetStatus(v string) *GetLifecyclePolicyPreviewOutput { + s.Status = &v + return s +} + +// SetSummary sets the Summary field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetSummary(v *LifecyclePolicyPreviewSummary) *GetLifecyclePolicyPreviewOutput { + s.Summary = v + return s +} + +type GetRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository with the policy to retrieve. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRepositoryPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRepositoryPolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetRepositoryPolicyInput) SetRegistryId(v string) *GetRepositoryPolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetRepositoryPolicyInput) SetRepositoryName(v string) *GetRepositoryPolicyInput { + s.RepositoryName = &v + return s +} + +type GetRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text associated with the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s GetRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicyText sets the PolicyText field's value. +func (s *GetRepositoryPolicyOutput) SetPolicyText(v string) *GetRepositoryPolicyOutput { + s.PolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetRepositoryPolicyOutput) SetRegistryId(v string) *GetRepositoryPolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetRepositoryPolicyOutput) SetRepositoryName(v string) *GetRepositoryPolicyOutput { + s.RepositoryName = &v + return s +} + +// An object representing an Amazon ECR image. +type Image struct { + _ struct{} `type:"structure"` + + // An object containing the image tag and image digest associated with an image. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + + // The image manifest associated with the image. + ImageManifest *string `locationName:"imageManifest" min:"1" type:"string"` + + // The media type associated with the image manifest. + ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"` + + // The AWS account ID associated with the registry containing the image. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository associated with the image. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s Image) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Image) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *Image) SetImageId(v *ImageIdentifier) *Image { + s.ImageId = v + return s +} + +// SetImageManifest sets the ImageManifest field's value. +func (s *Image) SetImageManifest(v string) *Image { + s.ImageManifest = &v + return s +} + +// SetImageManifestMediaType sets the ImageManifestMediaType field's value. +func (s *Image) SetImageManifestMediaType(v string) *Image { + s.ImageManifestMediaType = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *Image) SetRegistryId(v string) *Image { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *Image) SetRepositoryName(v string) *Image { + s.RepositoryName = &v + return s +} + +// The specified image has already been pushed, and there were no changes to +// the manifest or image tag after the last push. +type ImageAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ImageAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorImageAlreadyExistsException(v protocol.ResponseMetadata) error { + return &ImageAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ImageAlreadyExistsException) Code() string { + return "ImageAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *ImageAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ImageAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *ImageAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ImageAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ImageAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object that describes an image returned by a DescribeImages operation. +type ImageDetail struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image manifest. + ImageDigest *string `locationName:"imageDigest" type:"string"` + + // The date and time, expressed in standard JavaScript date format, at which + // the current image was pushed to the repository. + ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"` + + // A summary of the last completed image scan. + ImageScanFindingsSummary *ImageScanFindingsSummary `locationName:"imageScanFindingsSummary" type:"structure"` + + // The current state of the scan. + ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"` + + // The size, in bytes, of the image in the repository. + // + // If the image is a manifest list, this will be the max size of all manifests + // in the list. + // + // Beginning with Docker version 1.9, the Docker client compresses image layers + // before pushing them to a V2 Docker registry. The output of the docker images + // command shows the uncompressed image size, so it may return a larger image + // size than the image sizes returned by DescribeImages. + ImageSizeInBytes *int64 `locationName:"imageSizeInBytes" type:"long"` + + // The list of tags associated with this image. + ImageTags []*string `locationName:"imageTags" type:"list"` + + // The AWS account ID associated with the registry to which this image belongs. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to which this image belongs. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s ImageDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageDetail) GoString() string { + return s.String() +} + +// SetImageDigest sets the ImageDigest field's value. +func (s *ImageDetail) SetImageDigest(v string) *ImageDetail { + s.ImageDigest = &v + return s +} + +// SetImagePushedAt sets the ImagePushedAt field's value. +func (s *ImageDetail) SetImagePushedAt(v time.Time) *ImageDetail { + s.ImagePushedAt = &v + return s +} + +// SetImageScanFindingsSummary sets the ImageScanFindingsSummary field's value. +func (s *ImageDetail) SetImageScanFindingsSummary(v *ImageScanFindingsSummary) *ImageDetail { + s.ImageScanFindingsSummary = v + return s +} + +// SetImageScanStatus sets the ImageScanStatus field's value. +func (s *ImageDetail) SetImageScanStatus(v *ImageScanStatus) *ImageDetail { + s.ImageScanStatus = v + return s +} + +// SetImageSizeInBytes sets the ImageSizeInBytes field's value. +func (s *ImageDetail) SetImageSizeInBytes(v int64) *ImageDetail { + s.ImageSizeInBytes = &v + return s +} + +// SetImageTags sets the ImageTags field's value. +func (s *ImageDetail) SetImageTags(v []*string) *ImageDetail { + s.ImageTags = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *ImageDetail) SetRegistryId(v string) *ImageDetail { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *ImageDetail) SetRepositoryName(v string) *ImageDetail { + s.RepositoryName = &v + return s +} + +// An object representing an Amazon ECR image failure. +type ImageFailure struct { + _ struct{} `type:"structure"` + + // The code associated with the failure. + FailureCode *string `locationName:"failureCode" type:"string" enum:"ImageFailureCode"` + + // The reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The image ID associated with the failure. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` +} + +// String returns the string representation +func (s ImageFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageFailure) GoString() string { + return s.String() +} + +// SetFailureCode sets the FailureCode field's value. +func (s *ImageFailure) SetFailureCode(v string) *ImageFailure { + s.FailureCode = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *ImageFailure) SetFailureReason(v string) *ImageFailure { + s.FailureReason = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ImageFailure) SetImageId(v *ImageIdentifier) *ImageFailure { + s.ImageId = v + return s +} + +// An object with identifying information for an Amazon ECR image. +type ImageIdentifier struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image manifest. + ImageDigest *string `locationName:"imageDigest" type:"string"` + + // The tag used for the image. + ImageTag *string `locationName:"imageTag" min:"1" type:"string"` +} + +// String returns the string representation +func (s ImageIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImageIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImageIdentifier"} + if s.ImageTag != nil && len(*s.ImageTag) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageTag", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageDigest sets the ImageDigest field's value. +func (s *ImageIdentifier) SetImageDigest(v string) *ImageIdentifier { + s.ImageDigest = &v + return s +} + +// SetImageTag sets the ImageTag field's value. +func (s *ImageIdentifier) SetImageTag(v string) *ImageIdentifier { + s.ImageTag = &v + return s +} + +// The image requested does not exist in the specified repository. +type ImageNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ImageNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageNotFoundException) GoString() string { + return s.String() +} + +func newErrorImageNotFoundException(v protocol.ResponseMetadata) error { + return &ImageNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ImageNotFoundException) Code() string { + return "ImageNotFoundException" +} + +// Message returns the exception's message. +func (s *ImageNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ImageNotFoundException) OrigErr() error { + return nil +} + +func (s *ImageNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ImageNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ImageNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Contains information about an image scan finding. +type ImageScanFinding struct { + _ struct{} `type:"structure"` + + // A collection of attributes of the host from which the finding is generated. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // The description of the finding. + Description *string `locationName:"description" type:"string"` + + // The name associated with the finding, usually a CVE number. + Name *string `locationName:"name" type:"string"` + + // The finding severity. + Severity *string `locationName:"severity" type:"string" enum:"FindingSeverity"` + + // A link containing additional details about the security vulnerability. + Uri *string `locationName:"uri" type:"string"` +} + +// String returns the string representation +func (s ImageScanFinding) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanFinding) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *ImageScanFinding) SetAttributes(v []*Attribute) *ImageScanFinding { + s.Attributes = v + return s +} + +// SetDescription sets the Description field's value. +func (s *ImageScanFinding) SetDescription(v string) *ImageScanFinding { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *ImageScanFinding) SetName(v string) *ImageScanFinding { + s.Name = &v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *ImageScanFinding) SetSeverity(v string) *ImageScanFinding { + s.Severity = &v + return s +} + +// SetUri sets the Uri field's value. +func (s *ImageScanFinding) SetUri(v string) *ImageScanFinding { + s.Uri = &v + return s +} + +// The details of an image scan. +type ImageScanFindings struct { + _ struct{} `type:"structure"` + + // The image vulnerability counts, sorted by severity. + FindingSeverityCounts map[string]*int64 `locationName:"findingSeverityCounts" type:"map"` + + // The findings from the image scan. + Findings []*ImageScanFinding `locationName:"findings" type:"list"` + + // The time of the last completed image scan. + ImageScanCompletedAt *time.Time `locationName:"imageScanCompletedAt" type:"timestamp"` + + // The time when the vulnerability data was last scanned. + VulnerabilitySourceUpdatedAt *time.Time `locationName:"vulnerabilitySourceUpdatedAt" type:"timestamp"` +} + +// String returns the string representation +func (s ImageScanFindings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanFindings) GoString() string { + return s.String() +} + +// SetFindingSeverityCounts sets the FindingSeverityCounts field's value. +func (s *ImageScanFindings) SetFindingSeverityCounts(v map[string]*int64) *ImageScanFindings { + s.FindingSeverityCounts = v + return s +} + +// SetFindings sets the Findings field's value. +func (s *ImageScanFindings) SetFindings(v []*ImageScanFinding) *ImageScanFindings { + s.Findings = v + return s +} + +// SetImageScanCompletedAt sets the ImageScanCompletedAt field's value. +func (s *ImageScanFindings) SetImageScanCompletedAt(v time.Time) *ImageScanFindings { + s.ImageScanCompletedAt = &v + return s +} + +// SetVulnerabilitySourceUpdatedAt sets the VulnerabilitySourceUpdatedAt field's value. +func (s *ImageScanFindings) SetVulnerabilitySourceUpdatedAt(v time.Time) *ImageScanFindings { + s.VulnerabilitySourceUpdatedAt = &v + return s +} + +// A summary of the last completed image scan. +type ImageScanFindingsSummary struct { + _ struct{} `type:"structure"` + + // The image vulnerability counts, sorted by severity. + FindingSeverityCounts map[string]*int64 `locationName:"findingSeverityCounts" type:"map"` + + // The time of the last completed image scan. + ImageScanCompletedAt *time.Time `locationName:"imageScanCompletedAt" type:"timestamp"` + + // The time when the vulnerability data was last scanned. + VulnerabilitySourceUpdatedAt *time.Time `locationName:"vulnerabilitySourceUpdatedAt" type:"timestamp"` +} + +// String returns the string representation +func (s ImageScanFindingsSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanFindingsSummary) GoString() string { + return s.String() +} + +// SetFindingSeverityCounts sets the FindingSeverityCounts field's value. +func (s *ImageScanFindingsSummary) SetFindingSeverityCounts(v map[string]*int64) *ImageScanFindingsSummary { + s.FindingSeverityCounts = v + return s +} + +// SetImageScanCompletedAt sets the ImageScanCompletedAt field's value. +func (s *ImageScanFindingsSummary) SetImageScanCompletedAt(v time.Time) *ImageScanFindingsSummary { + s.ImageScanCompletedAt = &v + return s +} + +// SetVulnerabilitySourceUpdatedAt sets the VulnerabilitySourceUpdatedAt field's value. +func (s *ImageScanFindingsSummary) SetVulnerabilitySourceUpdatedAt(v time.Time) *ImageScanFindingsSummary { + s.VulnerabilitySourceUpdatedAt = &v + return s +} + +// The current status of an image scan. +type ImageScanStatus struct { + _ struct{} `type:"structure"` + + // The description of the image scan status. + Description *string `locationName:"description" type:"string"` + + // The current state of an image scan. + Status *string `locationName:"status" type:"string" enum:"ScanStatus"` +} + +// String returns the string representation +func (s ImageScanStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanStatus) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ImageScanStatus) SetDescription(v string) *ImageScanStatus { + s.Description = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ImageScanStatus) SetStatus(v string) *ImageScanStatus { + s.Status = &v + return s +} + +// The image scanning configuration for a repository. +type ImageScanningConfiguration struct { + _ struct{} `type:"structure"` + + // The setting that determines whether images are scanned after being pushed + // to a repository. If set to true, images will be scanned after being pushed. + // If this parameter is not specified, it will default to false and images will + // not be scanned unless a scan is manually started with the StartImageScan + // API. + ScanOnPush *bool `locationName:"scanOnPush" type:"boolean"` +} + +// String returns the string representation +func (s ImageScanningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanningConfiguration) GoString() string { + return s.String() +} + +// SetScanOnPush sets the ScanOnPush field's value. +func (s *ImageScanningConfiguration) SetScanOnPush(v bool) *ImageScanningConfiguration { + s.ScanOnPush = &v + return s +} + +// The specified image is tagged with a tag that already exists. The repository +// is configured for tag immutability. +type ImageTagAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ImageTagAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageTagAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorImageTagAlreadyExistsException(v protocol.ResponseMetadata) error { + return &ImageTagAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ImageTagAlreadyExistsException) Code() string { + return "ImageTagAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *ImageTagAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ImageTagAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *ImageTagAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ImageTagAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ImageTagAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +type InitiateLayerUploadInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry to which you intend to upload + // layers. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to which you intend to upload layers. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateLayerUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateLayerUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InitiateLayerUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InitiateLayerUploadInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryId sets the RegistryId field's value. +func (s *InitiateLayerUploadInput) SetRegistryId(v string) *InitiateLayerUploadInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *InitiateLayerUploadInput) SetRepositoryName(v string) *InitiateLayerUploadInput { + s.RepositoryName = &v + return s +} + +type InitiateLayerUploadOutput struct { + _ struct{} `type:"structure"` + + // The size, in bytes, that Amazon ECR expects future layer part uploads to + // be. + PartSize *int64 `locationName:"partSize" type:"long"` + + // The upload ID for the layer upload. This parameter is passed to further UploadLayerPart + // and CompleteLayerUpload operations. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s InitiateLayerUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateLayerUploadOutput) GoString() string { + return s.String() +} + +// SetPartSize sets the PartSize field's value. +func (s *InitiateLayerUploadOutput) SetPartSize(v int64) *InitiateLayerUploadOutput { + s.PartSize = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *InitiateLayerUploadOutput) SetUploadId(v string) *InitiateLayerUploadOutput { + s.UploadId = &v + return s +} + +// The layer digest calculation performed by Amazon ECR upon receipt of the +// image layer does not match the digest specified. +type InvalidLayerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidLayerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidLayerException) GoString() string { + return s.String() +} + +func newErrorInvalidLayerException(v protocol.ResponseMetadata) error { + return &InvalidLayerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidLayerException) Code() string { + return "InvalidLayerException" +} + +// Message returns the exception's message. +func (s *InvalidLayerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidLayerException) OrigErr() error { + return nil +} + +func (s *InvalidLayerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidLayerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidLayerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The layer part size is not valid, or the first byte specified is not consecutive +// to the last byte of a previous layer part upload. +type InvalidLayerPartException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The last valid byte received from the layer part upload that is associated + // with the exception. + LastValidByteReceived *int64 `locationName:"lastValidByteReceived" type:"long"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` + + // The registry ID associated with the exception. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the exception. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The upload ID associated with the exception. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s InvalidLayerPartException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidLayerPartException) GoString() string { + return s.String() +} + +func newErrorInvalidLayerPartException(v protocol.ResponseMetadata) error { + return &InvalidLayerPartException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidLayerPartException) Code() string { + return "InvalidLayerPartException" +} + +// Message returns the exception's message. +func (s *InvalidLayerPartException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidLayerPartException) OrigErr() error { + return nil +} + +func (s *InvalidLayerPartException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidLayerPartException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidLayerPartException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified parameter is invalid. Review the available parameters for the +// API request. +type InvalidParameterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidParameterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidParameterException) GoString() string { + return s.String() +} + +func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { + return &InvalidParameterException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidParameterException) Code() string { + return "InvalidParameterException" +} + +// Message returns the exception's message. +func (s *InvalidParameterException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidParameterException) OrigErr() error { + return nil +} + +func (s *InvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An invalid parameter has been specified. Tag keys can have a maximum character +// length of 128 characters, and tag values can have a maximum length of 256 +// characters. +type InvalidTagParameterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidTagParameterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidTagParameterException) GoString() string { + return s.String() +} + +func newErrorInvalidTagParameterException(v protocol.ResponseMetadata) error { + return &InvalidTagParameterException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidTagParameterException) Code() string { + return "InvalidTagParameterException" +} + +// Message returns the exception's message. +func (s *InvalidTagParameterException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidTagParameterException) OrigErr() error { + return nil +} + +func (s *InvalidTagParameterException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidTagParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidTagParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object representing an Amazon ECR image layer. +type Layer struct { + _ struct{} `type:"structure"` + + // The availability status of the image layer. + LayerAvailability *string `locationName:"layerAvailability" type:"string" enum:"LayerAvailability"` + + // The sha256 digest of the image layer. + LayerDigest *string `locationName:"layerDigest" type:"string"` + + // The size, in bytes, of the image layer. + LayerSize *int64 `locationName:"layerSize" type:"long"` + + // The media type of the layer, such as application/vnd.docker.image.rootfs.diff.tar.gzip + // or application/vnd.oci.image.layer.v1.tar+gzip. + MediaType *string `locationName:"mediaType" type:"string"` +} + +// String returns the string representation +func (s Layer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Layer) GoString() string { + return s.String() +} + +// SetLayerAvailability sets the LayerAvailability field's value. +func (s *Layer) SetLayerAvailability(v string) *Layer { + s.LayerAvailability = &v + return s +} + +// SetLayerDigest sets the LayerDigest field's value. +func (s *Layer) SetLayerDigest(v string) *Layer { + s.LayerDigest = &v + return s +} + +// SetLayerSize sets the LayerSize field's value. +func (s *Layer) SetLayerSize(v int64) *Layer { + s.LayerSize = &v + return s +} + +// SetMediaType sets the MediaType field's value. +func (s *Layer) SetMediaType(v string) *Layer { + s.MediaType = &v + return s +} + +// The image layer already exists in the associated repository. +type LayerAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s LayerAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LayerAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorLayerAlreadyExistsException(v protocol.ResponseMetadata) error { + return &LayerAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LayerAlreadyExistsException) Code() string { + return "LayerAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *LayerAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LayerAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *LayerAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LayerAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LayerAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object representing an Amazon ECR image layer failure. +type LayerFailure struct { + _ struct{} `type:"structure"` + + // The failure code associated with the failure. + FailureCode *string `locationName:"failureCode" type:"string" enum:"LayerFailureCode"` + + // The reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The layer digest associated with the failure. + LayerDigest *string `locationName:"layerDigest" type:"string"` +} + +// String returns the string representation +func (s LayerFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LayerFailure) GoString() string { + return s.String() +} + +// SetFailureCode sets the FailureCode field's value. +func (s *LayerFailure) SetFailureCode(v string) *LayerFailure { + s.FailureCode = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *LayerFailure) SetFailureReason(v string) *LayerFailure { + s.FailureReason = &v + return s +} + +// SetLayerDigest sets the LayerDigest field's value. +func (s *LayerFailure) SetLayerDigest(v string) *LayerFailure { + s.LayerDigest = &v + return s +} + +// The specified layer is not available because it is not associated with an +// image. Unassociated image layers may be cleaned up at any time. +type LayerInaccessibleException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s LayerInaccessibleException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LayerInaccessibleException) GoString() string { + return s.String() +} + +func newErrorLayerInaccessibleException(v protocol.ResponseMetadata) error { + return &LayerInaccessibleException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LayerInaccessibleException) Code() string { + return "LayerInaccessibleException" +} + +// Message returns the exception's message. +func (s *LayerInaccessibleException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LayerInaccessibleException) OrigErr() error { + return nil +} + +func (s *LayerInaccessibleException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LayerInaccessibleException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LayerInaccessibleException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Layer parts must be at least 5 MiB in size. +type LayerPartTooSmallException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s LayerPartTooSmallException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LayerPartTooSmallException) GoString() string { + return s.String() +} + +func newErrorLayerPartTooSmallException(v protocol.ResponseMetadata) error { + return &LayerPartTooSmallException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LayerPartTooSmallException) Code() string { + return "LayerPartTooSmallException" +} + +// Message returns the exception's message. +func (s *LayerPartTooSmallException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LayerPartTooSmallException) OrigErr() error { + return nil +} + +func (s *LayerPartTooSmallException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LayerPartTooSmallException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LayerPartTooSmallException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified layers could not be found, or the specified layer is not valid +// for this repository. +type LayersNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s LayersNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LayersNotFoundException) GoString() string { + return s.String() +} + +func newErrorLayersNotFoundException(v protocol.ResponseMetadata) error { + return &LayersNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LayersNotFoundException) Code() string { + return "LayersNotFoundException" +} + +// Message returns the exception's message. +func (s *LayersNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LayersNotFoundException) OrigErr() error { + return nil +} + +func (s *LayersNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LayersNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LayersNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The lifecycle policy could not be found, and no policy is set to the repository. +type LifecyclePolicyNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s LifecyclePolicyNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyNotFoundException) GoString() string { + return s.String() +} + +func newErrorLifecyclePolicyNotFoundException(v protocol.ResponseMetadata) error { + return &LifecyclePolicyNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LifecyclePolicyNotFoundException) Code() string { + return "LifecyclePolicyNotFoundException" +} + +// Message returns the exception's message. +func (s *LifecyclePolicyNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LifecyclePolicyNotFoundException) OrigErr() error { + return nil +} + +func (s *LifecyclePolicyNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LifecyclePolicyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LifecyclePolicyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The filter for the lifecycle policy preview. +type LifecyclePolicyPreviewFilter struct { + _ struct{} `type:"structure"` + + // The tag status of the image. + TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewFilter) GoString() string { + return s.String() +} + +// SetTagStatus sets the TagStatus field's value. +func (s *LifecyclePolicyPreviewFilter) SetTagStatus(v string) *LifecyclePolicyPreviewFilter { + s.TagStatus = &v + return s +} + +// The previous lifecycle policy preview request has not completed. Please try +// again later. +type LifecyclePolicyPreviewInProgressException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewInProgressException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewInProgressException) GoString() string { + return s.String() +} + +func newErrorLifecyclePolicyPreviewInProgressException(v protocol.ResponseMetadata) error { + return &LifecyclePolicyPreviewInProgressException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LifecyclePolicyPreviewInProgressException) Code() string { + return "LifecyclePolicyPreviewInProgressException" +} + +// Message returns the exception's message. +func (s *LifecyclePolicyPreviewInProgressException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LifecyclePolicyPreviewInProgressException) OrigErr() error { + return nil +} + +func (s *LifecyclePolicyPreviewInProgressException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LifecyclePolicyPreviewInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LifecyclePolicyPreviewInProgressException) RequestID() string { + return s.RespMetadata.RequestID +} + +// There is no dry run for this repository. +type LifecyclePolicyPreviewNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewNotFoundException) GoString() string { + return s.String() +} + +func newErrorLifecyclePolicyPreviewNotFoundException(v protocol.ResponseMetadata) error { + return &LifecyclePolicyPreviewNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LifecyclePolicyPreviewNotFoundException) Code() string { + return "LifecyclePolicyPreviewNotFoundException" +} + +// Message returns the exception's message. +func (s *LifecyclePolicyPreviewNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LifecyclePolicyPreviewNotFoundException) OrigErr() error { + return nil +} + +func (s *LifecyclePolicyPreviewNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LifecyclePolicyPreviewNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LifecyclePolicyPreviewNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The result of the lifecycle policy preview. +type LifecyclePolicyPreviewResult struct { + _ struct{} `type:"structure"` + + // The type of action to be taken. + Action *LifecyclePolicyRuleAction `locationName:"action" type:"structure"` + + // The priority of the applied rule. + AppliedRulePriority *int64 `locationName:"appliedRulePriority" min:"1" type:"integer"` + + // The sha256 digest of the image manifest. + ImageDigest *string `locationName:"imageDigest" type:"string"` + + // The date and time, expressed in standard JavaScript date format, at which + // the current image was pushed to the repository. + ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"` + + // The list of tags associated with this image. + ImageTags []*string `locationName:"imageTags" type:"list"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewResult) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *LifecyclePolicyPreviewResult) SetAction(v *LifecyclePolicyRuleAction) *LifecyclePolicyPreviewResult { + s.Action = v + return s +} + +// SetAppliedRulePriority sets the AppliedRulePriority field's value. +func (s *LifecyclePolicyPreviewResult) SetAppliedRulePriority(v int64) *LifecyclePolicyPreviewResult { + s.AppliedRulePriority = &v + return s +} + +// SetImageDigest sets the ImageDigest field's value. +func (s *LifecyclePolicyPreviewResult) SetImageDigest(v string) *LifecyclePolicyPreviewResult { + s.ImageDigest = &v + return s +} + +// SetImagePushedAt sets the ImagePushedAt field's value. +func (s *LifecyclePolicyPreviewResult) SetImagePushedAt(v time.Time) *LifecyclePolicyPreviewResult { + s.ImagePushedAt = &v + return s +} + +// SetImageTags sets the ImageTags field's value. +func (s *LifecyclePolicyPreviewResult) SetImageTags(v []*string) *LifecyclePolicyPreviewResult { + s.ImageTags = v + return s +} + +// The summary of the lifecycle policy preview request. +type LifecyclePolicyPreviewSummary struct { + _ struct{} `type:"structure"` + + // The number of expiring images. + ExpiringImageTotalCount *int64 `locationName:"expiringImageTotalCount" type:"integer"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewSummary) GoString() string { + return s.String() +} + +// SetExpiringImageTotalCount sets the ExpiringImageTotalCount field's value. +func (s *LifecyclePolicyPreviewSummary) SetExpiringImageTotalCount(v int64) *LifecyclePolicyPreviewSummary { + s.ExpiringImageTotalCount = &v + return s +} + +// The type of action to be taken. +type LifecyclePolicyRuleAction struct { + _ struct{} `type:"structure"` + + // The type of action to be taken. + Type *string `locationName:"type" type:"string" enum:"ImageActionType"` +} + +// String returns the string representation +func (s LifecyclePolicyRuleAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyRuleAction) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *LifecyclePolicyRuleAction) SetType(v string) *LifecyclePolicyRuleAction { + s.Type = &v + return s +} + +// The operation did not succeed because it would have exceeded a service limit +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) +// in the Amazon Elastic Container Registry User Guide. +type LimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s LimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LimitExceededException) GoString() string { + return s.String() +} + +func newErrorLimitExceededException(v protocol.ResponseMetadata) error { + return &LimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LimitExceededException) Code() string { + return "LimitExceededException" +} + +// Message returns the exception's message. +func (s *LimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LimitExceededException) OrigErr() error { + return nil +} + +func (s *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object representing a filter on a ListImages operation. +type ListImagesFilter struct { + _ struct{} `type:"structure"` + + // The tag status with which to filter your ListImages results. You can filter + // results based on whether they are TAGGED or UNTAGGED. + TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"` +} + +// String returns the string representation +func (s ListImagesFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListImagesFilter) GoString() string { + return s.String() +} + +// SetTagStatus sets the TagStatus field's value. +func (s *ListImagesFilter) SetTagStatus(v string) *ListImagesFilter { + s.TagStatus = &v + return s +} + +type ListImagesInput struct { + _ struct{} `type:"structure"` + + // The filter key and value with which to filter your ListImages results. + Filter *ListImagesFilter `locationName:"filter" type:"structure"` + + // The maximum number of image results returned by ListImages in paginated output. + // When this parameter is used, ListImages only returns maxResults results in + // a single page along with a nextToken response element. The remaining results + // of the initial request can be seen by sending another ListImages request + // with the returned nextToken value. This value can be between 1 and 1000. + // If this parameter is not used, then ListImages returns up to 100 results + // and a nextToken value, if applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated ListImages request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository + // in which to list images. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository with image IDs to be listed. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListImagesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListImagesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListImagesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *ListImagesInput) SetFilter(v *ListImagesFilter) *ListImagesInput { + s.Filter = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListImagesInput) SetMaxResults(v int64) *ListImagesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListImagesInput) SetNextToken(v string) *ListImagesInput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *ListImagesInput) SetRegistryId(v string) *ListImagesInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *ListImagesInput) SetRepositoryName(v string) *ListImagesInput { + s.RepositoryName = &v + return s +} + +type ListImagesOutput struct { + _ struct{} `type:"structure"` + + // The list of image IDs for the requested repository. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` + + // The nextToken value to include in a future ListImages request. When the results + // of a ListImages request exceed maxResults, this value can be used to retrieve + // the next page of results. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListImagesOutput) GoString() string { + return s.String() +} + +// SetImageIds sets the ImageIds field's value. +func (s *ListImagesOutput) SetImageIds(v []*ImageIdentifier) *ListImagesOutput { + s.ImageIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListImagesOutput) SetNextToken(v string) *ListImagesOutput { + s.NextToken = &v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the resource for which to + // list the tags. Currently, the only supported resource is an Amazon ECR repository. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags for the resource. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +type PutImageInput struct { + _ struct{} `type:"structure"` + + // The image manifest corresponding to the image to be uploaded. + // + // ImageManifest is a required field + ImageManifest *string `locationName:"imageManifest" min:"1" type:"string" required:"true"` + + // The media type of the image manifest. If you push an image manifest that + // does not contain the mediaType field, you must specify the imageManifestMediaType + // in the request. + ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"` + + // The tag to associate with the image. This parameter is required for images + // that use the Docker Image Manifest V2 Schema 2 or OCI formats. + ImageTag *string `locationName:"imageTag" min:"1" type:"string"` + + // The AWS account ID associated with the registry that contains the repository + // in which to put the image. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository in which to put the image. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutImageInput"} + if s.ImageManifest == nil { + invalidParams.Add(request.NewErrParamRequired("ImageManifest")) + } + if s.ImageManifest != nil && len(*s.ImageManifest) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageManifest", 1)) + } + if s.ImageTag != nil && len(*s.ImageTag) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageTag", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageManifest sets the ImageManifest field's value. +func (s *PutImageInput) SetImageManifest(v string) *PutImageInput { + s.ImageManifest = &v + return s +} + +// SetImageManifestMediaType sets the ImageManifestMediaType field's value. +func (s *PutImageInput) SetImageManifestMediaType(v string) *PutImageInput { + s.ImageManifestMediaType = &v + return s +} + +// SetImageTag sets the ImageTag field's value. +func (s *PutImageInput) SetImageTag(v string) *PutImageInput { + s.ImageTag = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageInput) SetRegistryId(v string) *PutImageInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageInput) SetRepositoryName(v string) *PutImageInput { + s.RepositoryName = &v + return s +} + +type PutImageOutput struct { + _ struct{} `type:"structure"` + + // Details of the image uploaded. + Image *Image `locationName:"image" type:"structure"` +} + +// String returns the string representation +func (s PutImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageOutput) GoString() string { + return s.String() +} + +// SetImage sets the Image field's value. +func (s *PutImageOutput) SetImage(v *Image) *PutImageOutput { + s.Image = v + return s +} + +type PutImageScanningConfigurationInput struct { + _ struct{} `type:"structure"` + + // The image scanning configuration for the repository. This setting determines + // whether images are scanned for known vulnerabilities after being pushed to + // the repository. + // + // ImageScanningConfiguration is a required field + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure" required:"true"` + + // The AWS account ID associated with the registry that contains the repository + // in which to update the image scanning configuration setting. If you do not + // specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository in which to update the image scanning configuration + // setting. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutImageScanningConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageScanningConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutImageScanningConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutImageScanningConfigurationInput"} + if s.ImageScanningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ImageScanningConfiguration")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *PutImageScanningConfigurationInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *PutImageScanningConfigurationInput { + s.ImageScanningConfiguration = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageScanningConfigurationInput) SetRegistryId(v string) *PutImageScanningConfigurationInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageScanningConfigurationInput) SetRepositoryName(v string) *PutImageScanningConfigurationInput { + s.RepositoryName = &v + return s +} + +type PutImageScanningConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The image scanning configuration setting for the repository. + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s PutImageScanningConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageScanningConfigurationOutput) GoString() string { + return s.String() +} + +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *PutImageScanningConfigurationOutput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *PutImageScanningConfigurationOutput { + s.ImageScanningConfiguration = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageScanningConfigurationOutput) SetRegistryId(v string) *PutImageScanningConfigurationOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageScanningConfigurationOutput) SetRepositoryName(v string) *PutImageScanningConfigurationOutput { + s.RepositoryName = &v + return s +} + +type PutImageTagMutabilityInput struct { + _ struct{} `type:"structure"` + + // The tag mutability setting for the repository. If MUTABLE is specified, image + // tags can be overwritten. If IMMUTABLE is specified, all image tags within + // the repository will be immutable which will prevent them from being overwritten. + // + // ImageTagMutability is a required field + ImageTagMutability *string `locationName:"imageTagMutability" type:"string" required:"true" enum:"ImageTagMutability"` + + // The AWS account ID associated with the registry that contains the repository + // in which to update the image tag mutability settings. If you do not specify + // a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository in which to update the image tag mutability settings. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutImageTagMutabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageTagMutabilityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutImageTagMutabilityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutImageTagMutabilityInput"} + if s.ImageTagMutability == nil { + invalidParams.Add(request.NewErrParamRequired("ImageTagMutability")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageTagMutability sets the ImageTagMutability field's value. +func (s *PutImageTagMutabilityInput) SetImageTagMutability(v string) *PutImageTagMutabilityInput { + s.ImageTagMutability = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageTagMutabilityInput) SetRegistryId(v string) *PutImageTagMutabilityInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageTagMutabilityInput) SetRepositoryName(v string) *PutImageTagMutabilityInput { + s.RepositoryName = &v + return s +} + +type PutImageTagMutabilityOutput struct { + _ struct{} `type:"structure"` + + // The image tag mutability setting for the repository. + ImageTagMutability *string `locationName:"imageTagMutability" type:"string" enum:"ImageTagMutability"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s PutImageTagMutabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageTagMutabilityOutput) GoString() string { + return s.String() +} + +// SetImageTagMutability sets the ImageTagMutability field's value. +func (s *PutImageTagMutabilityOutput) SetImageTagMutability(v string) *PutImageTagMutabilityOutput { + s.ImageTagMutability = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageTagMutabilityOutput) SetRegistryId(v string) *PutImageTagMutabilityOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageTagMutabilityOutput) SetRepositoryName(v string) *PutImageTagMutabilityOutput { + s.RepositoryName = &v + return s +} + +type PutLifecyclePolicyInput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text to apply to the repository. + // + // LifecyclePolicyText is a required field + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to receive the policy. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutLifecyclePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecyclePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutLifecyclePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutLifecyclePolicyInput"} + if s.LifecyclePolicyText == nil { + invalidParams.Add(request.NewErrParamRequired("LifecyclePolicyText")) + } + if s.LifecyclePolicyText != nil && len(*s.LifecyclePolicyText) < 100 { + invalidParams.Add(request.NewErrParamMinLen("LifecyclePolicyText", 100)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *PutLifecyclePolicyInput) SetLifecyclePolicyText(v string) *PutLifecyclePolicyInput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutLifecyclePolicyInput) SetRegistryId(v string) *PutLifecyclePolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutLifecyclePolicyInput) SetRepositoryName(v string) *PutLifecyclePolicyInput { + s.RepositoryName = &v + return s +} + +type PutLifecyclePolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s PutLifecyclePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecyclePolicyOutput) GoString() string { + return s.String() +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *PutLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *PutLifecyclePolicyOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutLifecyclePolicyOutput) SetRegistryId(v string) *PutLifecyclePolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutLifecyclePolicyOutput) SetRepositoryName(v string) *PutLifecyclePolicyOutput { + s.RepositoryName = &v + return s +} + +// The manifest list is referencing an image that does not exist. +type ReferencedImagesNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ReferencedImagesNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReferencedImagesNotFoundException) GoString() string { + return s.String() +} + +func newErrorReferencedImagesNotFoundException(v protocol.ResponseMetadata) error { + return &ReferencedImagesNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ReferencedImagesNotFoundException) Code() string { + return "ReferencedImagesNotFoundException" +} + +// Message returns the exception's message. +func (s *ReferencedImagesNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ReferencedImagesNotFoundException) OrigErr() error { + return nil +} + +func (s *ReferencedImagesNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ReferencedImagesNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ReferencedImagesNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object representing a repository. +type Repository struct { + _ struct{} `type:"structure"` + + // The date and time, in JavaScript date format, when the repository was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // The image scanning configuration for a repository. + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` + + // The tag mutability setting for the repository. + ImageTagMutability *string `locationName:"imageTagMutability" type:"string" enum:"ImageTagMutability"` + + // The AWS account ID associated with the registry that contains the repository. + RegistryId *string `locationName:"registryId" type:"string"` + + // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains + // the arn:aws:ecr namespace, followed by the region of the repository, AWS + // account ID of the repository owner, repository namespace, and repository + // name. For example, arn:aws:ecr:region:012345678910:repository/test. + RepositoryArn *string `locationName:"repositoryArn" type:"string"` + + // The name of the repository. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The URI for the repository. You can use this URI for Docker push or pull + // operations. + RepositoryUri *string `locationName:"repositoryUri" type:"string"` +} + +// String returns the string representation +func (s Repository) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Repository) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *Repository) SetCreatedAt(v time.Time) *Repository { + s.CreatedAt = &v + return s +} + +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *Repository) SetImageScanningConfiguration(v *ImageScanningConfiguration) *Repository { + s.ImageScanningConfiguration = v + return s +} + +// SetImageTagMutability sets the ImageTagMutability field's value. +func (s *Repository) SetImageTagMutability(v string) *Repository { + s.ImageTagMutability = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *Repository) SetRegistryId(v string) *Repository { + s.RegistryId = &v + return s +} + +// SetRepositoryArn sets the RepositoryArn field's value. +func (s *Repository) SetRepositoryArn(v string) *Repository { + s.RepositoryArn = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *Repository) SetRepositoryName(v string) *Repository { + s.RepositoryName = &v + return s +} + +// SetRepositoryUri sets the RepositoryUri field's value. +func (s *Repository) SetRepositoryUri(v string) *Repository { + s.RepositoryUri = &v + return s +} + +// The specified repository already exists in the specified registry. +type RepositoryAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s RepositoryAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorRepositoryAlreadyExistsException(v protocol.ResponseMetadata) error { + return &RepositoryAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *RepositoryAlreadyExistsException) Code() string { + return "RepositoryAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *RepositoryAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *RepositoryAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *RepositoryAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *RepositoryAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *RepositoryAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified repository contains images. To delete a repository that contains +// images, you must force the deletion with the force parameter. +type RepositoryNotEmptyException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s RepositoryNotEmptyException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryNotEmptyException) GoString() string { + return s.String() +} + +func newErrorRepositoryNotEmptyException(v protocol.ResponseMetadata) error { + return &RepositoryNotEmptyException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *RepositoryNotEmptyException) Code() string { + return "RepositoryNotEmptyException" +} + +// Message returns the exception's message. +func (s *RepositoryNotEmptyException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *RepositoryNotEmptyException) OrigErr() error { + return nil +} + +func (s *RepositoryNotEmptyException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *RepositoryNotEmptyException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *RepositoryNotEmptyException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +type RepositoryNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s RepositoryNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryNotFoundException) GoString() string { + return s.String() +} + +func newErrorRepositoryNotFoundException(v protocol.ResponseMetadata) error { + return &RepositoryNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *RepositoryNotFoundException) Code() string { + return "RepositoryNotFoundException" +} + +// Message returns the exception's message. +func (s *RepositoryNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *RepositoryNotFoundException) OrigErr() error { + return nil +} + +func (s *RepositoryNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *RepositoryNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *RepositoryNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified repository and registry combination does not have an associated +// repository policy. +type RepositoryPolicyNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s RepositoryPolicyNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryPolicyNotFoundException) GoString() string { + return s.String() +} + +func newErrorRepositoryPolicyNotFoundException(v protocol.ResponseMetadata) error { + return &RepositoryPolicyNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *RepositoryPolicyNotFoundException) Code() string { + return "RepositoryPolicyNotFoundException" +} + +// Message returns the exception's message. +func (s *RepositoryPolicyNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *RepositoryPolicyNotFoundException) OrigErr() error { + return nil +} + +func (s *RepositoryPolicyNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *RepositoryPolicyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *RepositoryPolicyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified image scan could not be found. Ensure that image scanning is +// enabled on the repository and try again. +type ScanNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ScanNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanNotFoundException) GoString() string { + return s.String() +} + +func newErrorScanNotFoundException(v protocol.ResponseMetadata) error { + return &ScanNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ScanNotFoundException) Code() string { + return "ScanNotFoundException" +} + +// Message returns the exception's message. +func (s *ScanNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ScanNotFoundException) OrigErr() error { + return nil +} + +func (s *ScanNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ScanNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ScanNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// These errors are usually caused by a server-side issue. +type ServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerException) GoString() string { + return s.String() +} + +func newErrorServerException(v protocol.ResponseMetadata) error { + return &ServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServerException) Code() string { + return "ServerException" +} + +// Message returns the exception's message. +func (s *ServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServerException) OrigErr() error { + return nil +} + +func (s *ServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +type SetRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // If the policy you are attempting to set on a repository policy would prevent + // you from setting another policy in the future, you must force the SetRepositoryPolicy + // operation. This is intended to prevent accidental repository lock outs. + Force *bool `locationName:"force" type:"boolean"` + + // The JSON repository policy text to apply to the repository. For more information, + // see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) + // in the Amazon Elastic Container Registry User Guide. + // + // PolicyText is a required field + PolicyText *string `locationName:"policyText" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to receive the policy. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetRepositoryPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetRepositoryPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetRepositoryPolicyInput"} + if s.PolicyText == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyText")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForce sets the Force field's value. +func (s *SetRepositoryPolicyInput) SetForce(v bool) *SetRepositoryPolicyInput { + s.Force = &v + return s +} + +// SetPolicyText sets the PolicyText field's value. +func (s *SetRepositoryPolicyInput) SetPolicyText(v string) *SetRepositoryPolicyInput { + s.PolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *SetRepositoryPolicyInput) SetRegistryId(v string) *SetRepositoryPolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *SetRepositoryPolicyInput) SetRepositoryName(v string) *SetRepositoryPolicyInput { + s.RepositoryName = &v + return s +} + +type SetRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text applied to the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s SetRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetRepositoryPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicyText sets the PolicyText field's value. +func (s *SetRepositoryPolicyOutput) SetPolicyText(v string) *SetRepositoryPolicyOutput { + s.PolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *SetRepositoryPolicyOutput) SetRegistryId(v string) *SetRepositoryPolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *SetRepositoryPolicyOutput) SetRepositoryName(v string) *SetRepositoryPolicyOutput { + s.RepositoryName = &v + return s +} + +type StartImageScanInput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + // + // ImageId is a required field + ImageId *ImageIdentifier `locationName:"imageId" type:"structure" required:"true"` + + // The AWS account ID associated with the registry that contains the repository + // in which to start an image scan request. If you do not specify a registry, + // the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that contains the images to scan. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartImageScanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartImageScanInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartImageScanInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartImageScanInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.ImageId != nil { + if err := s.ImageId.Validate(); err != nil { + invalidParams.AddNested("ImageId", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageId sets the ImageId field's value. +func (s *StartImageScanInput) SetImageId(v *ImageIdentifier) *StartImageScanInput { + s.ImageId = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartImageScanInput) SetRegistryId(v string) *StartImageScanInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartImageScanInput) SetRepositoryName(v string) *StartImageScanInput { + s.RepositoryName = &v + return s +} + +type StartImageScanOutput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + + // The current state of the scan. + ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s StartImageScanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartImageScanOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *StartImageScanOutput) SetImageId(v *ImageIdentifier) *StartImageScanOutput { + s.ImageId = v + return s +} + +// SetImageScanStatus sets the ImageScanStatus field's value. +func (s *StartImageScanOutput) SetImageScanStatus(v *ImageScanStatus) *StartImageScanOutput { + s.ImageScanStatus = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartImageScanOutput) SetRegistryId(v string) *StartImageScanOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartImageScanOutput) SetRepositoryName(v string) *StartImageScanOutput { + s.RepositoryName = &v + return s +} + +type StartLifecyclePolicyPreviewInput struct { + _ struct{} `type:"structure"` + + // The policy to be evaluated against. If you do not specify a policy, the current + // policy for the repository is used. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to be evaluated. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartLifecyclePolicyPreviewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLifecyclePolicyPreviewInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartLifecyclePolicyPreviewInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartLifecyclePolicyPreviewInput"} + if s.LifecyclePolicyText != nil && len(*s.LifecyclePolicyText) < 100 { + invalidParams.Add(request.NewErrParamMinLen("LifecyclePolicyText", 100)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *StartLifecyclePolicyPreviewInput) SetLifecyclePolicyText(v string) *StartLifecyclePolicyPreviewInput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartLifecyclePolicyPreviewInput) SetRegistryId(v string) *StartLifecyclePolicyPreviewInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartLifecyclePolicyPreviewInput) SetRepositoryName(v string) *StartLifecyclePolicyPreviewInput { + s.RepositoryName = &v + return s +} + +type StartLifecyclePolicyPreviewOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The status of the lifecycle policy preview request. + Status *string `locationName:"status" type:"string" enum:"LifecyclePolicyPreviewStatus"` +} + +// String returns the string representation +func (s StartLifecyclePolicyPreviewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLifecyclePolicyPreviewOutput) GoString() string { + return s.String() +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetLifecyclePolicyText(v string) *StartLifecyclePolicyPreviewOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetRegistryId(v string) *StartLifecyclePolicyPreviewOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetRepositoryName(v string) *StartLifecyclePolicyPreviewOutput { + s.RepositoryName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetStatus(v string) *StartLifecyclePolicyPreviewOutput { + s.Status = &v + return s +} + +// The metadata that you apply to a resource to help you categorize and organize +// them. Each tag consists of a key and an optional value, both of which you +// define. Tag keys can have a maximum character length of 128 characters, and +// tag values can have a maximum length of 256 characters. +type Tag struct { + _ struct{} `type:"structure"` + + // One part of a key-value pair that make up a tag. A key is a general label + // that acts like a category for more specific tag values. + Key *string `type:"string"` + + // The optional part of a key-value pair that make up a tag. A value acts as + // a descriptor within a tag category (key). + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the the resource to which to add tags. + // Currently, the only supported resource is an Amazon ECR repository. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + + // The tags to add to the resource. A tag is an array of key-value pairs. Tag + // keys can have a maximum character length of 128 characters, and tag values + // can have a maximum length of 256 characters. + // + // Tags is a required field + Tags []*Tag `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// The list of tags on the repository is over the limit. The maximum number +// of tags that can be applied to a repository is 50. +type TooManyTagsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s TooManyTagsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TooManyTagsException) GoString() string { + return s.String() +} + +func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { + return &TooManyTagsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyTagsException) Code() string { + return "TooManyTagsException" +} + +// Message returns the exception's message. +func (s *TooManyTagsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyTagsException) OrigErr() error { + return nil +} + +func (s *TooManyTagsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The image is of a type that cannot be scanned. +type UnsupportedImageTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UnsupportedImageTypeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedImageTypeException) GoString() string { + return s.String() +} + +func newErrorUnsupportedImageTypeException(v protocol.ResponseMetadata) error { + return &UnsupportedImageTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedImageTypeException) Code() string { + return "UnsupportedImageTypeException" +} + +// Message returns the exception's message. +func (s *UnsupportedImageTypeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedImageTypeException) OrigErr() error { + return nil +} + +func (s *UnsupportedImageTypeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedImageTypeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedImageTypeException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource from which to remove tags. + // Currently, the only supported resource is an Amazon ECR repository. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + + // The keys of the tags to be removed. + // + // TagKeys is a required field + TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UploadLayerPartInput struct { + _ struct{} `type:"structure"` + + // The base64-encoded layer part payload. + // + // LayerPartBlob is automatically base64 encoded/decoded by the SDK. + // + // LayerPartBlob is a required field + LayerPartBlob []byte `locationName:"layerPartBlob" type:"blob" required:"true"` + + // The position of the first byte of the layer part witin the overall image + // layer. + // + // PartFirstByte is a required field + PartFirstByte *int64 `locationName:"partFirstByte" type:"long" required:"true"` + + // The position of the last byte of the layer part within the overall image + // layer. + // + // PartLastByte is a required field + PartLastByte *int64 `locationName:"partLastByte" type:"long" required:"true"` + + // The AWS account ID associated with the registry to which you are uploading + // layer parts. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to which you are uploading layer parts. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` + + // The upload ID from a previous InitiateLayerUpload operation to associate + // with the layer part upload. + // + // UploadId is a required field + UploadId *string `locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadLayerPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadLayerPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadLayerPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadLayerPartInput"} + if s.LayerPartBlob == nil { + invalidParams.Add(request.NewErrParamRequired("LayerPartBlob")) + } + if s.PartFirstByte == nil { + invalidParams.Add(request.NewErrParamRequired("PartFirstByte")) + } + if s.PartLastByte == nil { + invalidParams.Add(request.NewErrParamRequired("PartLastByte")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLayerPartBlob sets the LayerPartBlob field's value. +func (s *UploadLayerPartInput) SetLayerPartBlob(v []byte) *UploadLayerPartInput { + s.LayerPartBlob = v + return s +} + +// SetPartFirstByte sets the PartFirstByte field's value. +func (s *UploadLayerPartInput) SetPartFirstByte(v int64) *UploadLayerPartInput { + s.PartFirstByte = &v + return s +} + +// SetPartLastByte sets the PartLastByte field's value. +func (s *UploadLayerPartInput) SetPartLastByte(v int64) *UploadLayerPartInput { + s.PartLastByte = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *UploadLayerPartInput) SetRegistryId(v string) *UploadLayerPartInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *UploadLayerPartInput) SetRepositoryName(v string) *UploadLayerPartInput { + s.RepositoryName = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadLayerPartInput) SetUploadId(v string) *UploadLayerPartInput { + s.UploadId = &v + return s +} + +type UploadLayerPartOutput struct { + _ struct{} `type:"structure"` + + // The integer value of the last byte received in the request. + LastByteReceived *int64 `locationName:"lastByteReceived" type:"long"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The upload ID associated with the request. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s UploadLayerPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadLayerPartOutput) GoString() string { + return s.String() +} + +// SetLastByteReceived sets the LastByteReceived field's value. +func (s *UploadLayerPartOutput) SetLastByteReceived(v int64) *UploadLayerPartOutput { + s.LastByteReceived = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *UploadLayerPartOutput) SetRegistryId(v string) *UploadLayerPartOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *UploadLayerPartOutput) SetRepositoryName(v string) *UploadLayerPartOutput { + s.RepositoryName = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadLayerPartOutput) SetUploadId(v string) *UploadLayerPartOutput { + s.UploadId = &v + return s +} + +// The upload could not be found, or the specified upload id is not valid for +// this repository. +type UploadNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UploadNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadNotFoundException) GoString() string { + return s.String() +} + +func newErrorUploadNotFoundException(v protocol.ResponseMetadata) error { + return &UploadNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UploadNotFoundException) Code() string { + return "UploadNotFoundException" +} + +// Message returns the exception's message. +func (s *UploadNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UploadNotFoundException) OrigErr() error { + return nil +} + +func (s *UploadNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UploadNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UploadNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +const ( + // FindingSeverityInformational is a FindingSeverity enum value + FindingSeverityInformational = "INFORMATIONAL" + + // FindingSeverityLow is a FindingSeverity enum value + FindingSeverityLow = "LOW" + + // FindingSeverityMedium is a FindingSeverity enum value + FindingSeverityMedium = "MEDIUM" + + // FindingSeverityHigh is a FindingSeverity enum value + FindingSeverityHigh = "HIGH" + + // FindingSeverityCritical is a FindingSeverity enum value + FindingSeverityCritical = "CRITICAL" + + // FindingSeverityUndefined is a FindingSeverity enum value + FindingSeverityUndefined = "UNDEFINED" +) + +const ( + // ImageActionTypeExpire is a ImageActionType enum value + ImageActionTypeExpire = "EXPIRE" +) + +const ( + // ImageFailureCodeInvalidImageDigest is a ImageFailureCode enum value + ImageFailureCodeInvalidImageDigest = "InvalidImageDigest" + + // ImageFailureCodeInvalidImageTag is a ImageFailureCode enum value + ImageFailureCodeInvalidImageTag = "InvalidImageTag" + + // ImageFailureCodeImageTagDoesNotMatchDigest is a ImageFailureCode enum value + ImageFailureCodeImageTagDoesNotMatchDigest = "ImageTagDoesNotMatchDigest" + + // ImageFailureCodeImageNotFound is a ImageFailureCode enum value + ImageFailureCodeImageNotFound = "ImageNotFound" + + // ImageFailureCodeMissingDigestAndTag is a ImageFailureCode enum value + ImageFailureCodeMissingDigestAndTag = "MissingDigestAndTag" + + // ImageFailureCodeImageReferencedByManifestList is a ImageFailureCode enum value + ImageFailureCodeImageReferencedByManifestList = "ImageReferencedByManifestList" +) + +const ( + // ImageTagMutabilityMutable is a ImageTagMutability enum value + ImageTagMutabilityMutable = "MUTABLE" + + // ImageTagMutabilityImmutable is a ImageTagMutability enum value + ImageTagMutabilityImmutable = "IMMUTABLE" +) + +const ( + // LayerAvailabilityAvailable is a LayerAvailability enum value + LayerAvailabilityAvailable = "AVAILABLE" + + // LayerAvailabilityUnavailable is a LayerAvailability enum value + LayerAvailabilityUnavailable = "UNAVAILABLE" +) + +const ( + // LayerFailureCodeInvalidLayerDigest is a LayerFailureCode enum value + LayerFailureCodeInvalidLayerDigest = "InvalidLayerDigest" + + // LayerFailureCodeMissingLayerDigest is a LayerFailureCode enum value + LayerFailureCodeMissingLayerDigest = "MissingLayerDigest" +) + +const ( + // LifecyclePolicyPreviewStatusInProgress is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusInProgress = "IN_PROGRESS" + + // LifecyclePolicyPreviewStatusComplete is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusComplete = "COMPLETE" + + // LifecyclePolicyPreviewStatusExpired is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusExpired = "EXPIRED" + + // LifecyclePolicyPreviewStatusFailed is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusFailed = "FAILED" +) + +const ( + // ScanStatusInProgress is a ScanStatus enum value + ScanStatusInProgress = "IN_PROGRESS" + + // ScanStatusComplete is a ScanStatus enum value + ScanStatusComplete = "COMPLETE" + + // ScanStatusFailed is a ScanStatus enum value + ScanStatusFailed = "FAILED" +) + +const ( + // TagStatusTagged is a TagStatus enum value + TagStatusTagged = "TAGGED" + + // TagStatusUntagged is a TagStatus enum value + TagStatusUntagged = "UNTAGGED" + + // TagStatusAny is a TagStatus enum value + TagStatusAny = "ANY" +) diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go b/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go new file mode 100644 index 000000000..d970974bc --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go @@ -0,0 +1,33 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ecr provides the client and types for making API +// requests to Amazon EC2 Container Registry. +// +// Amazon Elastic Container Registry (Amazon ECR) is a managed Docker registry +// service. Customers can use the familiar Docker CLI to push, pull, and manage +// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon +// ECR supports private Docker repositories with resource-based permissions +// using IAM so that specific users or Amazon EC2 instances can access repositories +// and images. Developers can use the Docker CLI to author and manage images. +// +// See https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 for more information on this service. +// +// See ecr package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ecr/ +// +// Using the Client +// +// To contact Amazon EC2 Container Registry with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon EC2 Container Registry client ECR for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ecr/#New +package ecr diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go new file mode 100644 index 000000000..bf44256bd --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go @@ -0,0 +1,213 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ecr + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeEmptyUploadException for service response error code + // "EmptyUploadException". + // + // The specified layer upload does not contain any layer parts. + ErrCodeEmptyUploadException = "EmptyUploadException" + + // ErrCodeImageAlreadyExistsException for service response error code + // "ImageAlreadyExistsException". + // + // The specified image has already been pushed, and there were no changes to + // the manifest or image tag after the last push. + ErrCodeImageAlreadyExistsException = "ImageAlreadyExistsException" + + // ErrCodeImageNotFoundException for service response error code + // "ImageNotFoundException". + // + // The image requested does not exist in the specified repository. + ErrCodeImageNotFoundException = "ImageNotFoundException" + + // ErrCodeImageTagAlreadyExistsException for service response error code + // "ImageTagAlreadyExistsException". + // + // The specified image is tagged with a tag that already exists. The repository + // is configured for tag immutability. + ErrCodeImageTagAlreadyExistsException = "ImageTagAlreadyExistsException" + + // ErrCodeInvalidLayerException for service response error code + // "InvalidLayerException". + // + // The layer digest calculation performed by Amazon ECR upon receipt of the + // image layer does not match the digest specified. + ErrCodeInvalidLayerException = "InvalidLayerException" + + // ErrCodeInvalidLayerPartException for service response error code + // "InvalidLayerPartException". + // + // The layer part size is not valid, or the first byte specified is not consecutive + // to the last byte of a previous layer part upload. + ErrCodeInvalidLayerPartException = "InvalidLayerPartException" + + // ErrCodeInvalidParameterException for service response error code + // "InvalidParameterException". + // + // The specified parameter is invalid. Review the available parameters for the + // API request. + ErrCodeInvalidParameterException = "InvalidParameterException" + + // ErrCodeInvalidTagParameterException for service response error code + // "InvalidTagParameterException". + // + // An invalid parameter has been specified. Tag keys can have a maximum character + // length of 128 characters, and tag values can have a maximum length of 256 + // characters. + ErrCodeInvalidTagParameterException = "InvalidTagParameterException" + + // ErrCodeLayerAlreadyExistsException for service response error code + // "LayerAlreadyExistsException". + // + // The image layer already exists in the associated repository. + ErrCodeLayerAlreadyExistsException = "LayerAlreadyExistsException" + + // ErrCodeLayerInaccessibleException for service response error code + // "LayerInaccessibleException". + // + // The specified layer is not available because it is not associated with an + // image. Unassociated image layers may be cleaned up at any time. + ErrCodeLayerInaccessibleException = "LayerInaccessibleException" + + // ErrCodeLayerPartTooSmallException for service response error code + // "LayerPartTooSmallException". + // + // Layer parts must be at least 5 MiB in size. + ErrCodeLayerPartTooSmallException = "LayerPartTooSmallException" + + // ErrCodeLayersNotFoundException for service response error code + // "LayersNotFoundException". + // + // The specified layers could not be found, or the specified layer is not valid + // for this repository. + ErrCodeLayersNotFoundException = "LayersNotFoundException" + + // ErrCodeLifecyclePolicyNotFoundException for service response error code + // "LifecyclePolicyNotFoundException". + // + // The lifecycle policy could not be found, and no policy is set to the repository. + ErrCodeLifecyclePolicyNotFoundException = "LifecyclePolicyNotFoundException" + + // ErrCodeLifecyclePolicyPreviewInProgressException for service response error code + // "LifecyclePolicyPreviewInProgressException". + // + // The previous lifecycle policy preview request has not completed. Please try + // again later. + ErrCodeLifecyclePolicyPreviewInProgressException = "LifecyclePolicyPreviewInProgressException" + + // ErrCodeLifecyclePolicyPreviewNotFoundException for service response error code + // "LifecyclePolicyPreviewNotFoundException". + // + // There is no dry run for this repository. + ErrCodeLifecyclePolicyPreviewNotFoundException = "LifecyclePolicyPreviewNotFoundException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // The operation did not succeed because it would have exceeded a service limit + // for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) + // in the Amazon Elastic Container Registry User Guide. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeReferencedImagesNotFoundException for service response error code + // "ReferencedImagesNotFoundException". + // + // The manifest list is referencing an image that does not exist. + ErrCodeReferencedImagesNotFoundException = "ReferencedImagesNotFoundException" + + // ErrCodeRepositoryAlreadyExistsException for service response error code + // "RepositoryAlreadyExistsException". + // + // The specified repository already exists in the specified registry. + ErrCodeRepositoryAlreadyExistsException = "RepositoryAlreadyExistsException" + + // ErrCodeRepositoryNotEmptyException for service response error code + // "RepositoryNotEmptyException". + // + // The specified repository contains images. To delete a repository that contains + // images, you must force the deletion with the force parameter. + ErrCodeRepositoryNotEmptyException = "RepositoryNotEmptyException" + + // ErrCodeRepositoryNotFoundException for service response error code + // "RepositoryNotFoundException". + // + // The specified repository could not be found. Check the spelling of the specified + // repository and ensure that you are performing operations on the correct registry. + ErrCodeRepositoryNotFoundException = "RepositoryNotFoundException" + + // ErrCodeRepositoryPolicyNotFoundException for service response error code + // "RepositoryPolicyNotFoundException". + // + // The specified repository and registry combination does not have an associated + // repository policy. + ErrCodeRepositoryPolicyNotFoundException = "RepositoryPolicyNotFoundException" + + // ErrCodeScanNotFoundException for service response error code + // "ScanNotFoundException". + // + // The specified image scan could not be found. Ensure that image scanning is + // enabled on the repository and try again. + ErrCodeScanNotFoundException = "ScanNotFoundException" + + // ErrCodeServerException for service response error code + // "ServerException". + // + // These errors are usually caused by a server-side issue. + ErrCodeServerException = "ServerException" + + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // The list of tags on the repository is over the limit. The maximum number + // of tags that can be applied to a repository is 50. + ErrCodeTooManyTagsException = "TooManyTagsException" + + // ErrCodeUnsupportedImageTypeException for service response error code + // "UnsupportedImageTypeException". + // + // The image is of a type that cannot be scanned. + ErrCodeUnsupportedImageTypeException = "UnsupportedImageTypeException" + + // ErrCodeUploadNotFoundException for service response error code + // "UploadNotFoundException". + // + // The upload could not be found, or the specified upload id is not valid for + // this repository. + ErrCodeUploadNotFoundException = "UploadNotFoundException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "EmptyUploadException": newErrorEmptyUploadException, + "ImageAlreadyExistsException": newErrorImageAlreadyExistsException, + "ImageNotFoundException": newErrorImageNotFoundException, + "ImageTagAlreadyExistsException": newErrorImageTagAlreadyExistsException, + "InvalidLayerException": newErrorInvalidLayerException, + "InvalidLayerPartException": newErrorInvalidLayerPartException, + "InvalidParameterException": newErrorInvalidParameterException, + "InvalidTagParameterException": newErrorInvalidTagParameterException, + "LayerAlreadyExistsException": newErrorLayerAlreadyExistsException, + "LayerInaccessibleException": newErrorLayerInaccessibleException, + "LayerPartTooSmallException": newErrorLayerPartTooSmallException, + "LayersNotFoundException": newErrorLayersNotFoundException, + "LifecyclePolicyNotFoundException": newErrorLifecyclePolicyNotFoundException, + "LifecyclePolicyPreviewInProgressException": newErrorLifecyclePolicyPreviewInProgressException, + "LifecyclePolicyPreviewNotFoundException": newErrorLifecyclePolicyPreviewNotFoundException, + "LimitExceededException": newErrorLimitExceededException, + "ReferencedImagesNotFoundException": newErrorReferencedImagesNotFoundException, + "RepositoryAlreadyExistsException": newErrorRepositoryAlreadyExistsException, + "RepositoryNotEmptyException": newErrorRepositoryNotEmptyException, + "RepositoryNotFoundException": newErrorRepositoryNotFoundException, + "RepositoryPolicyNotFoundException": newErrorRepositoryPolicyNotFoundException, + "ScanNotFoundException": newErrorScanNotFoundException, + "ServerException": newErrorServerException, + "TooManyTagsException": newErrorTooManyTagsException, + "UnsupportedImageTypeException": newErrorUnsupportedImageTypeException, + "UploadNotFoundException": newErrorUploadNotFoundException, +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go b/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go new file mode 100644 index 000000000..c4392395c --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ecr + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// ECR provides the API operation methods for making requests to +// Amazon EC2 Container Registry. See this package's package overview docs +// for details on the service. +// +// ECR methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type ECR struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "ecr" // Name of service. + EndpointsID = "api.ecr" // ID to lookup a service endpoint with. + ServiceID = "ECR" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the ECR client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a ECR client from just a session. +// svc := ecr.New(mySession) +// +// // Create a ECR client with additional configuration +// svc := ecr.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECR { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "ecr" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ECR { + svc := &ECR{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2015-09-21", + JSONVersion: "1.1", + TargetPrefix: "AmazonEC2ContainerRegistry_V20150921", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ECR operation and runs any +// custom request initialization. +func (c *ECR) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/waiters.go b/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/waiters.go new file mode 100644 index 000000000..4b6f88e4e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/service/ecr/waiters.go @@ -0,0 +1,112 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ecr + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilImageScanComplete uses the Amazon ECR API operation +// DescribeImageScanFindings to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *ECR) WaitUntilImageScanComplete(input *DescribeImageScanFindingsInput) error { + return c.WaitUntilImageScanCompleteWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilImageScanCompleteWithContext is an extended version of WaitUntilImageScanComplete. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) WaitUntilImageScanCompleteWithContext(ctx aws.Context, input *DescribeImageScanFindingsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilImageScanComplete", + MaxAttempts: 60, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "imageScanStatus.status", + Expected: "COMPLETE", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "imageScanStatus.status", + Expected: "FAILED", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeImageScanFindingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImageScanFindingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilLifecyclePolicyPreviewComplete uses the Amazon ECR API operation +// GetLifecyclePolicyPreview to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *ECR) WaitUntilLifecyclePolicyPreviewComplete(input *GetLifecyclePolicyPreviewInput) error { + return c.WaitUntilLifecyclePolicyPreviewCompleteWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilLifecyclePolicyPreviewCompleteWithContext is an extended version of WaitUntilLifecyclePolicyPreviewComplete. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) WaitUntilLifecyclePolicyPreviewCompleteWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilLifecyclePolicyPreviewComplete", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "status", + Expected: "COMPLETE", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "status", + Expected: "FAILED", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetLifecyclePolicyPreviewInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetLifecyclePolicyPreviewRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 000000000..550b5f687 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,3115 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials that you can use to access +// AWS resources that you might not normally have access to. These temporary +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use AssumeRole within your account or for cross-account +// access. For a comparison of AssumeRole with other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You cannot use AWS account root user credentials to call AssumeRole. You +// must use credentials for an IAM user or an IAM role to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account. +// Then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) +// in the IAM User Guide. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: You cannot call +// the AWS STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// To assume a role from a different account, your AWS account must be trusted +// by the role. The trust relationship is defined in the role's trust policy +// when the role is created. That trust policy states which accounts are allowed +// to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the user account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN +// of the role in the other account. If the user is in the same account as the +// role, then you can do either of the following: +// +// * Attach a policy to the user (identical to the previous user in a different +// account). +// +// * Add the user as a principal directly in the role's trust policy. +// +// In this case, the trust policy acts as an IAM resource-based policy. Users +// in the same account as the role do not need explicit permission to assume +// the role. For more information about trust policies and resource-based policies, +// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see Passing +// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an AWS MFA +// device. In that scenario, the trust policy of the role being assumed includes +// a condition that tests for MFA authentication. If the caller does not include +// valid MFA information, the request to assume the role is denied. The condition +// in a trust policy that tests for MFA authentication might look like the following +// example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA device produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the persistent +// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML assertion +// as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, session tags override the role's tags with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// SAML Configuration +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider. Example providers +// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID +// Connect-compatible identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application. You also don't need to deploy +// server-based proxy services that use long-term AWS credentials. Instead, +// the identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service API operations. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, the session tag overrides the role tag with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Identities +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to AWS. +// +// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and +// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some AWS operations additionally return +// an encoded message that can provide details about this authorization failure. +// +// Only certain AWS operations return an encoded authorization message. The +// documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the operation +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// AWS account to which the keys belong. Access key IDs beginning with AKIA +// are long-term credentials for an IAM user or the AWS account root user. Access +// key IDs beginning with ASIA are temporary credentials that are created using +// STS operations. If the account in the response belongs to you, you can sign +// in as the root user and review your root user access keys. Then, you can +// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// adds a policy to your IAM user or role that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when an IAM user +// or role is denied access. To view an example response, see I Am Not Authorized +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. You must +// call the GetFederationToken operation using the long-term security credentials +// of an IAM user. As a result, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// You can also call GetFederationToken using the security credentials of an +// AWS account root user, but we do not recommend it. Instead, we recommend +// that you create an IAM user for the purpose of the proxy application. Then +// attach a policy to the IAM user that limits federated users to only the actions +// and resources that they need to access. For more information, see IAM Best +// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials that +// are obtained by using AWS account root user credentials have a maximum duration +// of 3,600 seconds (1 hour). +// +// Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// AWS service except the following: +// +// * You cannot call any IAM operations using the AWS CLI or the AWS API. +// +// * You cannot call any STS operations except GetCallerIdentity. +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This +// means that you cannot have separate Department and department tag keys. Assume +// that the user that you are federating has the Department=Marketing tag and +// you pass the department=engineering session tag. Department and department +// are not saved as separate tags, and the session tag passed in the request +// takes precedence over the user tag. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. +// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA +// code that is associated with their MFA device. Using the temporary security +// credentials that are returned from the call, IAM users can then make programmatic +// calls to API operations that require MFA authentication. If you do not supply +// a correct MFA code, then the API returns an access denied error. For a comparison +// of GetSessionToken with the other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Session Duration +// +// The GetSessionToken operation must be called by using the long-term AWS security +// credentials of the AWS account root user or an IAM user. Credentials that +// are created by IAM users are valid for the duration that you specify. This +// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials +// based on account credentials can range from 900 seconds (15 minutes) up to +// 3,600 seconds (1 hour), with a default of 1 hour. +// +// Permissions +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot call any IAM API operations unless MFA authentication information +// is included in the request. +// +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with AWS account root user +// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. +// If GetSessionToken is called using AWS account root user credentials, the +// temporary credentials have root user permissions. Similarly, if GetSessionToken +// is called using the credentials of an IAM user, the temporary credentials +// have the same permissions as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your AWS Resources + // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests that use the temporary security credentials will expose the role + // session name to the external account in their AWS CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // A list of session tags that you want to pass. Each session tag consists of + // a key name and an associated value. For more information about session tags, + // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters, and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same + // key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + // + // Additionally, if you used temporary credentials to perform this operation, + // the new session inherits any transitive session tags from the calling session. + // If you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the AWS CloudTrail logs. + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` + + // A list of keys for session tags that you want to set as transitive. If you + // set a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed + // from this session to any subsequent sessions. + TransitiveTagKeys []*string `type:"list"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { + s.Tags = v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. +func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { + s.TransitiveTagKeys = v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercase letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the AWS account. + Account *string `type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using AWS account root user credentials are restricted to a maximum of 3,600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using root user credentials defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. The plain text that you use for both inline + // and managed session policies can't exceed 2,048 characters. You can provide + // up to 10 managed policy ARNs. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of session tags. Each session tag consists of a key name and an associated + // value. For more information about session tags, see Passing Session Tags + // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user + // tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + +// SetTags sets the Tags field's value. +func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { + s.Tags = v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3,600 seconds (one + // hour). If the duration is longer than one hour, the session for AWS account + // owners defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags +// to control access to resources. For more information, see Tagging AWS STS +// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 000000000..d5307fcaa --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 000000000..fcb720dca --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,108 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// By default, AWS Security Token Service (STS) is available as a global service, +// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. +// Global requests map to the US East (N. Virginia) region. AWS recommends using +// Regional AWS STS endpoints instead of the global endpoint to reduce latency, +// build in redundancy, and increase session token validity. For more information, +// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Most AWS Regions are enabled for operations in all AWS services by default. +// Those Regions are automatically activated for use with AWS STS. Some Regions, +// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more +// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) +// in the AWS General Reference. When you enable these AWS Regions, they are +// automatically activated for use with AWS STS. You cannot activate the STS +// endpoint for a Region that is disabled. Tokens that are valid in all AWS +// Regions are longer than tokens that are valid in Regions that are enabled +// by default. Changing this setting might affect existing systems where you +// temporarily store tokens. For more information, see Managing Global Endpoint +// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) +// in the IAM User Guide. +// +// After you activate a Region for use with AWS STS, you can direct AWS STS +// API calls to that Region. AWS STS recommends that you provide both the Region +// and endpoint when you make calls to a Regional endpoint. You can provide +// the Region alone for manually enabled Regions, such as Asia Pacific (Hong +// Kong). In this case, the calls are directed to the STS Regional endpoint. +// However, if you provide the Region alone for Regions enabled by default, +// the calls are directed to the global endpoint of https://sts.amazonaws.com. +// +// To view the list of AWS STS endpoints and whether they are active by default, +// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) +// in the IAM User Guide. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. +// +// If you activate AWS STS endpoints in Regions other than the default global +// endpoint, then you must also turn on CloudTrail logging in those Regions. +// This is necessary to record any AWS STS API calls that are made in those +// Regions. For more information, see Turning On CloudTrail in Additional Regions +// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) +// in the AWS CloudTrail User Guide. +// +// AWS Security Token Service (STS) is a global service with a single endpoint +// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls +// to a global service. However, because this endpoint is physically located +// in the US East (N. Virginia) Region, your logs list us-east-1 as the event +// Region. CloudTrail does not write these logs to the US East (Ohio) Region +// unless you choose to include global service logs in that Region. CloudTrail +// writes calls to all Regional endpoints to their respective Regions. For example, +// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) +// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU +// (Frankfurt) Region. +// +// To learn more about CloudTrail, including how to turn it on and find your +// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 000000000..a233f542e --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,82 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the identity provider (IDP) that + // was asked to verify the incoming identity token could not be reached. This + // is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the total packed size of the session policies + // and session tags combined was too large. An AWS conversion compresses the + // session policy document, session policy ARNs, and session tags into a packed + // binary format that has a separate limit. The error message indicates by percentage + // how close the policies and tags are to the upper size limit. For more information, + // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // You could receive this error even though you meet other defined session policy + // and session tag limits. For more information, see IAM and STS Entity Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 000000000..d34a68553 --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,98 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 000000000..e2e1d6efe --- /dev/null +++ b/extender/code/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/extender/code/vendor/github.com/beorn7/perks/LICENSE b/extender/code/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 000000000..339177be6 --- /dev/null +++ b/extender/code/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/extender/code/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/extender/code/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 000000000..1602287d7 --- /dev/null +++ b/extender/code/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/extender/code/vendor/github.com/beorn7/perks/quantile/stream.go b/extender/code/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 000000000..d7d14f8eb --- /dev/null +++ b/extender/code/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/extender/code/vendor/github.com/cespare/xxhash/v2/.travis.yml b/extender/code/vendor/github.com/cespare/xxhash/v2/.travis.yml new file mode 100644 index 000000000..c516ea88d --- /dev/null +++ b/extender/code/vendor/github.com/cespare/xxhash/v2/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - "1.x" + - master +env: + - TAGS="" + - TAGS="-tags purego" +script: go test $TAGS -v ./... diff --git a/extender/code/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/extender/code/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 000000000..24b53065f --- /dev/null +++ b/extender/code/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/extender/code/vendor/github.com/cespare/xxhash/v2/README.md b/extender/code/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 000000000..2fd8693c2 --- /dev/null +++ b/extender/code/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,67 @@ +# xxhash + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/extender/code/vendor/github.com/cespare/xxhash/v2/go.mod b/extender/code/vendor/github.com/cespare/xxhash/v2/go.mod new file mode 100644 index 000000000..49f67608b --- /dev/null +++ b/extender/code/vendor/github.com/cespare/xxhash/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/cespare/xxhash/v2 + +go 1.11 diff --git a/extender/code/vendor/github.com/cespare/xxhash/v2/go.sum b/extender/code/vendor/github.com/cespare/xxhash/v2/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash.go b/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 000000000..db0b35fbe --- /dev/null +++ b/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,236 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go new file mode 100644 index 000000000..ad14b807f --- /dev/null +++ b/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 000000000..d580e32ae --- /dev/null +++ b/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), CX + MOVQ b_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ b_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 000000000..4a5a82160 --- /dev/null +++ b/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 000000000..fc9bea7a3 --- /dev/null +++ b/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,15 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 000000000..53bf76efb --- /dev/null +++ b/extender/code/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,46 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "reflect" + "unsafe" +) + +// Notes: +// +// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ +// for some discussion about these unsafe conversions. +// +// In the future it's possible that compiler optimizations will make these +// unsafe operations unnecessary: https://golang.org/issue/2205. +// +// Both of these wrapper functions still incur function call overhead since they +// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write +// for strings to squeeze out a bit more speed. Mid-stack inlining should +// eventually fix this. + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return d.Write(b) +} diff --git a/extender/code/vendor/github.com/containerd/containerd/LICENSE b/extender/code/vendor/github.com/containerd/containerd/LICENSE new file mode 100644 index 000000000..584149b6e --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/containerd/containerd/NOTICE b/extender/code/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 000000000..8915f0277 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/extender/code/vendor/github.com/containerd/containerd/cio/io.go b/extender/code/vendor/github.com/containerd/containerd/cio/io.go new file mode 100644 index 000000000..c7cf4f0bc --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/cio/io.go @@ -0,0 +1,326 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cio + +import ( + "context" + "errors" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/containerd/containerd/defaults" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32<<10) + return &buffer + }, +} + +// Config holds the IO configurations. +type Config struct { + // Terminal is true if one has been allocated + Terminal bool + // Stdin path + Stdin string + // Stdout path + Stdout string + // Stderr path + Stderr string +} + +// IO holds the io information for a task or process +type IO interface { + // Config returns the IO configuration. + Config() Config + // Cancel aborts all current io operations. + Cancel() + // Wait blocks until all io copy operations have completed. + Wait() + // Close cleans up all open io resources. Cancel() is always called before + // Close() + Close() error +} + +// Creator creates new IO sets for a task +type Creator func(id string) (IO, error) + +// Attach allows callers to reattach to running tasks +// +// There should only be one reader for a task's IO set +// because fifo's can only be read from one reader or the output +// will be sent only to the first reads +type Attach func(*FIFOSet) (IO, error) + +// FIFOSet is a set of file paths to FIFOs for a task's standard IO streams +type FIFOSet struct { + Config + close func() error +} + +// Close the FIFOSet +func (f *FIFOSet) Close() error { + if f.close != nil { + return f.close() + } + return nil +} + +// NewFIFOSet returns a new FIFOSet from a Config and a close function +func NewFIFOSet(config Config, close func() error) *FIFOSet { + return &FIFOSet{Config: config, close: close} +} + +// Streams used to configure a Creator or Attach +type Streams struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + Terminal bool + FIFODir string +} + +// Opt customize options for creating a Creator or Attach +type Opt func(*Streams) + +// WithStdio sets stream options to the standard input/output streams +func WithStdio(opt *Streams) { + WithStreams(os.Stdin, os.Stdout, os.Stderr)(opt) +} + +// WithTerminal sets the terminal option +func WithTerminal(opt *Streams) { + opt.Terminal = true +} + +// WithStreams sets the stream options to the specified Reader and Writers +func WithStreams(stdin io.Reader, stdout, stderr io.Writer) Opt { + return func(opt *Streams) { + opt.Stdin = stdin + opt.Stdout = stdout + opt.Stderr = stderr + } +} + +// WithFIFODir sets the fifo directory. +// e.g. "/run/containerd/fifo", "/run/users/1001/containerd/fifo" +func WithFIFODir(dir string) Opt { + return func(opt *Streams) { + opt.FIFODir = dir + } +} + +// NewCreator returns an IO creator from the options +func NewCreator(opts ...Opt) Creator { + streams := &Streams{} + for _, opt := range opts { + opt(streams) + } + if streams.FIFODir == "" { + streams.FIFODir = defaults.DefaultFIFODir + } + return func(id string) (IO, error) { + fifos, err := NewFIFOSetInDir(streams.FIFODir, id, streams.Terminal) + if err != nil { + return nil, err + } + if streams.Stdin == nil { + fifos.Stdin = "" + } + if streams.Stdout == nil { + fifos.Stdout = "" + } + if streams.Stderr == nil { + fifos.Stderr = "" + } + return copyIO(fifos, streams) + } +} + +// NewAttach attaches the existing io for a task to the provided io.Reader/Writers +func NewAttach(opts ...Opt) Attach { + streams := &Streams{} + for _, opt := range opts { + opt(streams) + } + return func(fifos *FIFOSet) (IO, error) { + if fifos == nil { + return nil, fmt.Errorf("cannot attach, missing fifos") + } + return copyIO(fifos, streams) + } +} + +// NullIO redirects the container's IO into /dev/null +func NullIO(_ string) (IO, error) { + return &cio{}, nil +} + +// cio is a basic container IO implementation. +type cio struct { + config Config + wg *sync.WaitGroup + closers []io.Closer + cancel context.CancelFunc +} + +func (c *cio) Config() Config { + return c.config +} + +func (c *cio) Wait() { + if c.wg != nil { + c.wg.Wait() + } +} + +func (c *cio) Close() error { + var lastErr error + for _, closer := range c.closers { + if closer == nil { + continue + } + if err := closer.Close(); err != nil { + lastErr = err + } + } + return lastErr +} + +func (c *cio) Cancel() { + if c.cancel != nil { + c.cancel() + } +} + +type pipes struct { + Stdin io.WriteCloser + Stdout io.ReadCloser + Stderr io.ReadCloser +} + +// DirectIO allows task IO to be handled externally by the caller +type DirectIO struct { + pipes + cio +} + +var ( + _ IO = &DirectIO{} + _ IO = &logURI{} +) + +// LogURI provides the raw logging URI +func LogURI(uri *url.URL) Creator { + return func(_ string) (IO, error) { + return &logURI{ + config: Config{ + Stdout: uri.String(), + Stderr: uri.String(), + }, + }, nil + } +} + +// BinaryIO forwards container STDOUT|STDERR directly to a logging binary +func BinaryIO(binary string, args map[string]string) Creator { + return func(_ string) (IO, error) { + binary = filepath.Clean(binary) + if !strings.HasPrefix(binary, "/") { + return nil, errors.New("absolute path needed") + } + uri := &url.URL{ + Scheme: "binary", + Path: binary, + } + q := uri.Query() + for k, v := range args { + q.Set(k, v) + } + uri.RawQuery = q.Encode() + res := uri.String() + return &logURI{ + config: Config{ + Stdout: res, + Stderr: res, + }, + }, nil + } +} + +// LogFile creates a file on disk that logs the task's STDOUT,STDERR. +// If the log file already exists, the logs will be appended to the file. +func LogFile(path string) Creator { + return func(_ string) (IO, error) { + path = filepath.Clean(path) + if !strings.HasPrefix(path, "/") { + return nil, errors.New("absolute path needed") + } + uri := &url.URL{ + Scheme: "file", + Path: path, + } + res := uri.String() + return &logURI{ + config: Config{ + Stdout: res, + Stderr: res, + }, + }, nil + } +} + +type logURI struct { + config Config +} + +func (l *logURI) Config() Config { + return l.config +} + +func (l *logURI) Cancel() { + +} + +func (l *logURI) Wait() { + +} + +func (l *logURI) Close() error { + return nil +} + +// Load the io for a container but do not attach +// +// Allows io to be loaded on the task for deletion without +// starting copy routines +func Load(set *FIFOSet) (IO, error) { + return &cio{ + config: set.Config, + closers: []io.Closer{set}, + }, nil +} + +func (p *pipes) closers() []io.Closer { + return []io.Closer{p.Stdin, p.Stdout, p.Stderr} +} diff --git a/extender/code/vendor/github.com/containerd/containerd/cio/io_unix.go b/extender/code/vendor/github.com/containerd/containerd/cio/io_unix.go new file mode 100644 index 000000000..42d320933 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -0,0 +1,156 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cio + +import ( + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/containerd/fifo" + "github.com/pkg/errors" +) + +// NewFIFOSetInDir returns a new FIFOSet with paths in a temporary directory under root +func NewFIFOSetInDir(root, id string, terminal bool) (*FIFOSet, error) { + if root != "" { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + } + dir, err := ioutil.TempDir(root, "") + if err != nil { + return nil, err + } + closer := func() error { + return os.RemoveAll(dir) + } + return NewFIFOSet(Config{ + Stdin: filepath.Join(dir, id+"-stdin"), + Stdout: filepath.Join(dir, id+"-stdout"), + Stderr: filepath.Join(dir, id+"-stderr"), + Terminal: terminal, + }, closer), nil +} + +func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) { + var ctx, cancel = context.WithCancel(context.Background()) + pipes, err := openFifos(ctx, fifos) + if err != nil { + cancel() + return nil, err + } + + if fifos.Stdin != "" { + go func() { + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(pipes.Stdin, ioset.Stdin, *p) + pipes.Stdin.Close() + }() + } + + var wg = &sync.WaitGroup{} + if fifos.Stdout != "" { + wg.Add(1) + go func() { + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p) + pipes.Stdout.Close() + wg.Done() + }() + } + + if !fifos.Terminal && fifos.Stderr != "" { + wg.Add(1) + go func() { + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(ioset.Stderr, pipes.Stderr, *p) + pipes.Stderr.Close() + wg.Done() + }() + } + return &cio{ + config: fifos.Config, + wg: wg, + closers: append(pipes.closers(), fifos), + cancel: cancel, + }, nil +} + +func openFifos(ctx context.Context, fifos *FIFOSet) (pipes, error) { + var err error + defer func() { + if err != nil { + fifos.Close() + } + }() + + var f pipes + if fifos.Stdin != "" { + if f.Stdin, err = fifo.OpenFifo(ctx, fifos.Stdin, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil { + return f, errors.Wrapf(err, "failed to open stdin fifo") + } + defer func() { + if err != nil && f.Stdin != nil { + f.Stdin.Close() + } + }() + } + if fifos.Stdout != "" { + if f.Stdout, err = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil { + return f, errors.Wrapf(err, "failed to open stdout fifo") + } + defer func() { + if err != nil && f.Stdout != nil { + f.Stdout.Close() + } + }() + } + if fifos.Stderr != "" { + if f.Stderr, err = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil { + return f, errors.Wrapf(err, "failed to open stderr fifo") + } + } + return f, nil +} + +// NewDirectIO returns an IO implementation that exposes the IO streams as io.ReadCloser +// and io.WriteCloser. +func NewDirectIO(ctx context.Context, fifos *FIFOSet) (*DirectIO, error) { + ctx, cancel := context.WithCancel(ctx) + pipes, err := openFifos(ctx, fifos) + return &DirectIO{ + pipes: pipes, + cio: cio{ + config: fifos.Config, + closers: append(pipes.closers(), fifos), + cancel: cancel, + }, + }, err +} diff --git a/extender/code/vendor/github.com/containerd/containerd/cio/io_windows.go b/extender/code/vendor/github.com/containerd/containerd/cio/io_windows.go new file mode 100644 index 000000000..ca6cd54a4 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/cio/io_windows.go @@ -0,0 +1,170 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cio + +import ( + "context" + "fmt" + "io" + "net" + + winio "github.com/Microsoft/go-winio" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +const pipeRoot = `\\.\pipe` + +// NewFIFOSetInDir returns a new set of fifos for the task +func NewFIFOSetInDir(_, id string, terminal bool) (*FIFOSet, error) { + stderrPipe := "" + if !terminal { + stderrPipe = fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id) + } + return NewFIFOSet(Config{ + Terminal: terminal, + Stdin: fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id), + Stdout: fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id), + Stderr: stderrPipe, + }, nil), nil +} + +func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) { + var ( + set []io.Closer + ) + + if fifos.Stdin != "" { + l, err := winio.ListenPipe(fifos.Stdin, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdin) + } + defer func(l net.Listener) { + if err != nil { + l.Close() + } + }(l) + set = append(set, l) + + go func() { + c, err := l.Accept() + if err != nil { + log.L.WithError(err).Errorf("failed to accept stdin connection on %s", fifos.Stdin) + return + } + + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(c, ioset.Stdin, *p) + c.Close() + l.Close() + }() + } + + if fifos.Stdout != "" { + l, err := winio.ListenPipe(fifos.Stdout, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to create stdout pipe %s", fifos.Stdout) + } + defer func(l net.Listener) { + if err != nil { + l.Close() + } + }(l) + set = append(set, l) + + go func() { + c, err := l.Accept() + if err != nil { + log.L.WithError(err).Errorf("failed to accept stdout connection on %s", fifos.Stdout) + return + } + + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(ioset.Stdout, c, *p) + c.Close() + l.Close() + }() + } + + if fifos.Stderr != "" { + l, err := winio.ListenPipe(fifos.Stderr, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Stderr) + } + defer func(l net.Listener) { + if err != nil { + l.Close() + } + }(l) + set = append(set, l) + + go func() { + c, err := l.Accept() + if err != nil { + log.L.WithError(err).Errorf("failed to accept stderr connection on %s", fifos.Stderr) + return + } + + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(ioset.Stderr, c, *p) + c.Close() + l.Close() + }() + } + + return &cio{config: fifos.Config, closers: set}, nil +} + +// NewDirectIO returns an IO implementation that exposes the IO streams as io.ReadCloser +// and io.WriteCloser. +func NewDirectIO(stdin io.WriteCloser, stdout, stderr io.ReadCloser, terminal bool) *DirectIO { + return &DirectIO{ + pipes: pipes{ + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + }, + cio: cio{ + config: Config{Terminal: terminal}, + }, + } +} + +// NewDirectIOFromFIFOSet returns an IO implementation that exposes the IO streams as io.ReadCloser +// and io.WriteCloser. +func NewDirectIOFromFIFOSet(ctx context.Context, stdin io.WriteCloser, stdout, stderr io.ReadCloser, fifos *FIFOSet) *DirectIO { + _, cancel := context.WithCancel(ctx) + pipes := pipes{ + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + } + return &DirectIO{ + pipes: pipes, + cio: cio{ + config: fifos.Config, + closers: append(pipes.closers(), fifos), + cancel: cancel, + }, + } +} diff --git a/extender/code/vendor/github.com/containerd/containerd/defaults/defaults.go b/extender/code/vendor/github.com/containerd/containerd/defaults/defaults.go new file mode 100644 index 000000000..6f5b122ec --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/defaults/defaults.go @@ -0,0 +1,32 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package defaults + +const ( + // DefaultMaxRecvMsgSize defines the default maximum message size for + // receiving protobufs passed over the GRPC API. + DefaultMaxRecvMsgSize = 16 << 20 + // DefaultMaxSendMsgSize defines the default maximum message size for + // sending protobufs passed over the GRPC API. + DefaultMaxSendMsgSize = 16 << 20 + // DefaultRuntimeNSLabel defines the namespace label to check for the + // default runtime + DefaultRuntimeNSLabel = "containerd.io/defaults/runtime" + // DefaultSnapshotterNSLabel defines the namespace label to check for the + // default snapshotter + DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter" +) diff --git a/extender/code/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/extender/code/vendor/github.com/containerd/containerd/defaults/defaults_unix.go new file mode 100644 index 000000000..319e8777b --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -0,0 +1,37 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package defaults + +const ( + // DefaultRootDir is the default location used by containerd to store + // persistent data + DefaultRootDir = "/var/lib/containerd" + // DefaultStateDir is the default location used by containerd to store + // transient data + DefaultStateDir = "/run/containerd" + // DefaultAddress is the default unix socket address + DefaultAddress = "/run/containerd/containerd.sock" + // DefaultDebugAddress is the default unix socket address for pprof data + DefaultDebugAddress = "/run/containerd/debug.sock" + // DefaultFIFODir is the default location used by client-side cio library + // to store FIFOs. + DefaultFIFODir = "/run/containerd/fifo" + // DefaultRuntime is the default linux runtime + DefaultRuntime = "io.containerd.runc.v2" +) diff --git a/extender/code/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/extender/code/vendor/github.com/containerd/containerd/defaults/defaults_windows.go new file mode 100644 index 000000000..5eede8de8 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/defaults/defaults_windows.go @@ -0,0 +1,45 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package defaults + +import ( + "os" + "path/filepath" +) + +var ( + // DefaultRootDir is the default location used by containerd to store + // persistent data + DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root") + // DefaultStateDir is the default location used by containerd to store + // transient data + DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state") +) + +const ( + // DefaultAddress is the default winpipe address + DefaultAddress = `\\.\pipe\containerd-containerd` + // DefaultDebugAddress is the default winpipe address for pprof data + DefaultDebugAddress = `\\.\pipe\containerd-debug` + // DefaultFIFODir is the default location used by client-side cio library + // to store FIFOs. Unused on Windows. + DefaultFIFODir = "" + // DefaultRuntime is the default windows runtime + DefaultRuntime = "io.containerd.runhcs.v1" +) diff --git a/extender/code/vendor/github.com/containerd/containerd/defaults/doc.go b/extender/code/vendor/github.com/containerd/containerd/defaults/doc.go new file mode 100644 index 000000000..6da863ce2 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/defaults/doc.go @@ -0,0 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package defaults provides several common defaults for interacting with +// containerd. These can be used on the client-side or server-side. +package defaults diff --git a/extender/code/vendor/github.com/containerd/containerd/errdefs/errors.go b/extender/code/vendor/github.com/containerd/containerd/errdefs/errors.go new file mode 100644 index 000000000..b5200afc0 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -0,0 +1,93 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with errors.Wrap and error.Wrapf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +// +// The functions ToGRPC and FromGRPC can be used to map server-side and +// client-side errors to the correct types. +package errdefs + +import ( + "context" + + "github.com/pkg/errors" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// For the most part, we just try to provide local grpc errors. Most conditions +// map very well to those defined by grpc. +var ( + ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. + ErrInvalidArgument = errors.New("invalid argument") + ErrNotFound = errors.New("not found") + ErrAlreadyExists = errors.New("already exists") + ErrFailedPrecondition = errors.New("failed precondition") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented +) + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Cause(err) == ErrInvalidArgument +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Cause(err) == ErrNotFound +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Cause(err) == ErrAlreadyExists +} + +// IsFailedPrecondition returns true if an operation could not proceed to the +// lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Cause(err) == ErrFailedPrecondition +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Cause(err) == ErrUnavailable +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Cause(err) == ErrNotImplemented +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Cause(err) == context.Canceled +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Cause(err) == context.DeadlineExceeded +} diff --git a/extender/code/vendor/github.com/containerd/containerd/errdefs/grpc.go b/extender/code/vendor/github.com/containerd/containerd/errdefs/grpc.go new file mode 100644 index 000000000..209f63bd0 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import ( + "context" + "strings" + + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Errorf(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Errorf(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Errorf(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Errorf(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Errorf(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(errors.Wrapf(err, format, args...)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = errors.Wrap(cls, msg) + } else { + err = errors.WithStack(cls) + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := errDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/extender/code/vendor/github.com/containerd/containerd/log/context.go b/extender/code/vendor/github.com/containerd/containerd/log/context.go new file mode 100644 index 000000000..31f1a3ac0 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/log/context.go @@ -0,0 +1,90 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + "sync/atomic" + + "github.com/sirupsen/logrus" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} +) + +// TraceLevel is the log level for tracing. Trace level is lower than debug level, +// and is usually used to trace detailed behavior of the program. +const TraceLevel = logrus.Level(uint32(logrus.DebugLevel + 1)) + +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time is always the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// ParseLevel takes a string level and returns the Logrus log level constant. +// It supports trace level. +func ParseLevel(lvl string) (logrus.Level, error) { + if lvl == "trace" { + return TraceLevel, nil + } + return logrus.ParseLevel(lvl) +} + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L + } + + return logger.(*logrus.Entry) +} + +// Trace logs a message at level Trace with the log entry passed-in. +func Trace(e *logrus.Entry, args ...interface{}) { + level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level))) + if level >= TraceLevel { + e.Debug(args...) + } +} + +// Tracef logs a message at level Trace with the log entry passed-in. +func Tracef(e *logrus.Entry, format string, args ...interface{}) { + level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level))) + if level >= TraceLevel { + e.Debugf(format, args...) + } +} diff --git a/extender/code/vendor/github.com/containerd/containerd/platforms/compare.go b/extender/code/vendor/github.com/containerd/containerd/platforms/compare.go new file mode 100644 index 000000000..3ad22a10d --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/platforms/compare.go @@ -0,0 +1,229 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import specs "github.com/opencontainers/image-spec/specs-go/v1" + +// MatchComparer is able to match and compare platforms to +// filter and sort platforms. +type MatchComparer interface { + Matcher + + Less(specs.Platform, specs.Platform) bool +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes) +// For ARMv7, will also match ARMv6 and ARMv5 +// For ARMv6, will also match ARMv5 +func Only(platform specs.Platform) MatchComparer { + platform = Normalize(platform) + if platform.Architecture == "arm" { + if platform.Variant == "v8" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v7", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v6", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } + if platform.Variant == "v7" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v6", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } + if platform.Variant == "v6" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } + } + + return singlePlatformComparer{ + Matcher: &matcher{ + Platform: platform, + }, + } +} + +// Ordered returns a platform MatchComparer which matches any of the platforms +// but orders them in order they are provided. +func Ordered(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return orderedPlatformComparer{ + matchers: matchers, + } +} + +// Any returns a platform MatchComparer which matches any of the platforms +// with no preference for ordering. +func Any(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return anyPlatformComparer{ + matchers: matchers, + } +} + +// All is a platform MatchComparer which matches all platforms +// with preference for ordering. +var All MatchComparer = allPlatformComparer{} + +type singlePlatformComparer struct { + Matcher +} + +func (c singlePlatformComparer) Less(p1, p2 specs.Platform) bool { + return c.Match(p1) && !c.Match(p2) +} + +type orderedPlatformComparer struct { + matchers []Matcher +} + +func (c orderedPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } + } + return false +} + +type anyPlatformComparer struct { + matchers []Matcher +} + +func (c anyPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { + var p1m, p2m bool + for _, m := range c.matchers { + if !p1m && m.Match(p1) { + p1m = true + } + if !p2m && m.Match(p2) { + p2m = true + } + if p1m && p2m { + return false + } + } + // If one matches, and the other does, sort match first + return p1m && !p2m +} + +type allPlatformComparer struct{} + +func (allPlatformComparer) Match(specs.Platform) bool { + return true +} + +func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { + return false +} diff --git a/extender/code/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/extender/code/vendor/github.com/containerd/containerd/platforms/cpuinfo.go new file mode 100644 index 000000000..69b336d67 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -0,0 +1,117 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "os" + "runtime" + "strings" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +// Present the ARM instruction set architecture, eg: v7, v8 +var cpuVariant string + +func init() { + if isArmArch(runtime.GOARCH) { + cpuVariant = getCPUVariant() + } else { + cpuVariant = "" + } +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if !isLinuxOS(runtime.GOOS) { + return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) +} + +func getCPUVariant() string { + if runtime.GOOS == "windows" { + // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + + return variant + } + + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + log.L.WithError(err).Error("failure getting variant") + return "" + } + + switch variant { + case "8", "AArch64": + variant = "v8" + case "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6TEJ": + variant = "v6" + case "5", "5T", "5TE", "5TEJ": + variant = "v5" + case "4", "4T": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant +} diff --git a/extender/code/vendor/github.com/containerd/containerd/platforms/database.go b/extender/code/vendor/github.com/containerd/containerd/platforms/database.go new file mode 100644 index 000000000..6ede94061 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/platforms/database.go @@ -0,0 +1,114 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "strings" +) + +// isLinuxOS returns true if the operating system is Linux. +// +// The OS value should be normalized before calling this function. +func isLinuxOS(os string) bool { + return os == "linux" +} + +// These function are generated from https://golang.org/src/go/build/syslist.go. +// +// We use switch statements because they are slightly faster than map lookups +// and use a little less memory. + +// isKnownOS returns true if we know about the operating system. +// +// The OS value should be normalized before calling this function. +func isKnownOS(os string) bool { + switch os { + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + return true + } + return false +} + +// isArmArch returns true if the architecture is ARM. +// +// The arch value should be normalized before being passed to this function. +func isArmArch(arch string) bool { + switch arch { + case "arm", "arm64": + return true + } + return false +} + +// isKnownArch returns true if we know about the architecture. +// +// The arch value should be normalized before being passed to this function. +func isKnownArch(arch string) bool { + switch arch { + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + return true + } + return false +} + +func normalizeOS(os string) string { + if os == "" { + return runtime.GOOS + } + os = strings.ToLower(os) + + switch os { + case "macos": + os = "darwin" + } + return os +} + +// normalizeArch normalizes the architecture. +func normalizeArch(arch, variant string) (string, string) { + arch, variant = strings.ToLower(arch), strings.ToLower(variant) + switch arch { + case "i386": + arch = "386" + variant = "" + case "x86_64", "x86-64": + arch = "amd64" + variant = "" + case "aarch64", "arm64": + arch = "arm64" + switch variant { + case "8", "v8": + variant = "" + } + case "armhf": + arch = "arm" + variant = "v7" + case "armel": + arch = "arm" + variant = "v6" + case "arm": + switch variant { + case "", "7": + variant = "v7" + case "5", "6", "8": + variant = "v" + variant + } + } + + return arch, variant +} diff --git a/extender/code/vendor/github.com/containerd/containerd/platforms/defaults.go b/extender/code/vendor/github.com/containerd/containerd/platforms/defaults.go new file mode 100644 index 000000000..a14d80e58 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -0,0 +1,38 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultString returns the default string specifier for the platform. +func DefaultString() string { + return Format(DefaultSpec()) +} + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant, + } +} diff --git a/extender/code/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/extender/code/vendor/github.com/containerd/containerd/platforms/defaults_unix.go new file mode 100644 index 000000000..e8a7d5ffa --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -0,0 +1,24 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/extender/code/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/extender/code/vendor/github.com/containerd/containerd/platforms/defaults_windows.go new file mode 100644 index 000000000..0defbd36c --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -0,0 +1,31 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: "amd64", + }) +} diff --git a/extender/code/vendor/github.com/containerd/containerd/platforms/platforms.go b/extender/code/vendor/github.com/containerd/containerd/platforms/platforms.go new file mode 100644 index 000000000..d2b73ac3d --- /dev/null +++ b/extender/code/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -0,0 +1,279 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package platforms provides a toolkit for normalizing, matching and +// specifying container platforms. +// +// Centered around OCI platform specifications, we define a string-based +// specifier syntax that can be used for user input. With a specifier, users +// only need to specify the parts of the platform that are relevant to their +// context, providing an operating system or architecture or both. +// +// How do I use this package? +// +// The vast majority of use cases should simply use the match function with +// user input. The first step is to parse a specifier into a matcher: +// +// m, err := Parse("linux") +// if err != nil { ... } +// +// Once you have a matcher, use it to match against the platform declared by a +// component, typically from an image or runtime. Since extracting an images +// platform is a little more involved, we'll use an example against the +// platform default: +// +// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// +// This can be composed in loops for resolving runtimes or used as a filter for +// fetch and select images. +// +// More details of the specifier syntax and platform spec follow. +// +// Declaring Platform Support +// +// Components that have strict platform requirements should use the OCI +// platform specification to declare their support. Typically, this will be +// images and runtimes that should make these declaring which platform they +// support specifically. This looks roughly as follows: +// +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } +// +// Most images and runtimes should at least set Architecture and OS, according +// to their GOARCH and GOOS values, respectively (follow the OCI image +// specification when in doubt). ARM should set variant under certain +// discussions, which are outlined below. +// +// Platform Specifiers +// +// While the OCI platform specifications provide a tool for components to +// specify structured information, user input typically doesn't need the full +// context and much can be inferred. To solve this problem, we introduced +// "specifiers". A specifier has the format +// `||/[/]`. The user can provide either the +// operating system or the architecture or both. +// +// An example of a common specifier is `linux/amd64`. If the host has a default +// of runtime that matches this, the user can simply provide the component that +// matters. For example, if a image provides amd64 and arm64 support, the +// operating system, `linux` can be inferred, so they only have to provide +// `arm64` or `amd64`. Similar behavior is implemented for operating systems, +// where the architecture may be known but a runtime may support images from +// different operating systems. +// +// Normalization +// +// Because not all users are familiar with the way the Go runtime represents +// platforms, several normalizations have been provided to make this package +// easier to user. +// +// The following are performed for architectures: +// +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 +// +// We also normalize the operating system `macos` to `darwin`. +// +// ARM Support +// +// To qualify ARM architecture, the Variant field is used to qualify the arm +// version. The most common arm version, v7, is represented without the variant +// unless it is explicitly provided. This is treated as equivalent to armhf. A +// previous architecture, armel, will be normalized to arm/v6. +// +// While these normalizations are provided, their support on arm platforms has +// not yet been fully implemented and tested. +package platforms + +import ( + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/containerd/containerd/errdefs" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) +) + +// Matcher matches platforms specifications, provided by an image or runtime. +type Matcher interface { + Match(platform specs.Platform) bool +} + +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} + +type matcher struct { + specs.Platform +} + +func (m *matcher) Match(platform specs.Platform) bool { + normalized := Normalize(platform) + return m.OS == normalized.OS && + m.Architecture == normalized.Architecture && + m.Variant == normalized.Variant +} + +func (m *matcher) String() string { + return Format(m.Platform) +} + +// Parse parses the platform specifier syntax into a platform declaration. +// +// Platform specifiers are in the format `||/[/]`. +// The minimum required information for a platform specifier is the operating +// system or architecture. If there is only a single string (no slashes), the +// value will be matched against the known set of operating systems, then fall +// back to the known set of architectures. The missing component will be +// inferred based on the local environment. +func Parse(specifier string) (specs.Platform, error) { + if strings.Contains(specifier, "*") { + // TODO(stevvooe): need to work out exact wildcard handling + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + } + + parts := strings.Split(specifier, "/") + + for _, part := range parts { + if !specifierRe.MatchString(part) { + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + } + } + + var p specs.Platform + switch len(parts) { + case 1: + // in this case, we will test that the value might be an OS, then look + // it up. If it is not known, we'll treat it as an architecture. Since + // we have very little information about the platform here, we are + // going to be a little more strict if we don't know about the argument + // value. + p.OS = normalizeOS(parts[0]) + if isKnownOS(p.OS) { + // picks a default architecture + p.Architecture = runtime.GOARCH + if p.Architecture == "arm" { + // TODO(stevvooe): Resolve arm variant, if not v6 (default) + return specs.Platform{}, errors.Wrapf(errdefs.ErrNotImplemented, "arm support not fully implemented") + } + + return p, nil + } + + p.Architecture, p.Variant = normalizeArch(parts[0], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + if isKnownArch(p.Architecture) { + p.OS = runtime.GOOS + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + case 2: + // In this case, we treat as a regular os/arch pair. We don't care + // about whether or not we know of the platform. + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + + return p, nil + case 3: + // we have a fully specified variant, this is rare + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) + if p.Architecture == "arm64" && p.Variant == "" { + p.Variant = "v8" + } + + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) +} + +// MustParse is like Parses but panics if the specifier cannot be parsed. +// Simplifies initialization of global variables. +func MustParse(specifier string) specs.Platform { + p, err := Parse(specifier) + if err != nil { + panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) + } + return p +} + +// Format returns a string specifier from the provided platform specification. +func Format(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) +} + +func joinNotEmpty(s ...string) string { + var ss []string + for _, s := range s { + if s == "" { + continue + } + + ss = append(ss, s) + } + + return strings.Join(ss, "/") +} + +// Normalize validates and translate the platform to the canonical value. +// +// For example, if "Aarch64" is encountered, we change it to "arm64" or if +// "x86_64" is encountered, it becomes "amd64". +func Normalize(platform specs.Platform) specs.Platform { + platform.OS = normalizeOS(platform.OS) + platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + + // these fields are deprecated, remove them + platform.OSFeatures = nil + platform.OSVersion = "" + + return platform +} diff --git a/extender/code/vendor/github.com/containerd/continuity/AUTHORS b/extender/code/vendor/github.com/containerd/continuity/AUTHORS new file mode 100644 index 000000000..4043394cc --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/AUTHORS @@ -0,0 +1,16 @@ +Aaron Lehmann +Akash Gupta +Akihiro Suda +Andrew Pennebaker +Brandon Philips +Christopher Jones +Daniel, Dao Quang Minh +Derek McGowan +Edward Pilatowicz +Ian Campbell +Justin Cormack +Justin Cummins +Phil Estes +Stephen J Day +Tobias Klauser +Tonis Tiigi diff --git a/extender/code/vendor/github.com/containerd/continuity/LICENSE b/extender/code/vendor/github.com/containerd/continuity/LICENSE new file mode 100644 index 000000000..584149b6e --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/containerd/continuity/devices/devices.go b/extender/code/vendor/github.com/containerd/continuity/devices/devices.go new file mode 100644 index 000000000..e4d4a0370 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/devices/devices.go @@ -0,0 +1,21 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package devices + +import "fmt" + +var ErrNotSupported = fmt.Errorf("not supported") diff --git a/extender/code/vendor/github.com/containerd/continuity/devices/devices_unix.go b/extender/code/vendor/github.com/containerd/continuity/devices/devices_unix.go new file mode 100644 index 000000000..520a5a6f3 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/devices/devices_unix.go @@ -0,0 +1,74 @@ +// +build linux darwin freebsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package devices + +import ( + "fmt" + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, 0, fmt.Errorf("cannot extract device from os.FileInfo") + } + + dev := uint64(sys.Rdev) + return uint64(unix.Major(dev)), uint64(unix.Minor(dev)), nil +} + +// mknod provides a shortcut for syscall.Mknod +func Mknod(p string, mode os.FileMode, maj, min int) error { + var ( + m = syscallMode(mode.Perm()) + dev uint64 + ) + + if mode&os.ModeDevice != 0 { + dev = unix.Mkdev(uint32(maj), uint32(min)) + + if mode&os.ModeCharDevice != 0 { + m |= unix.S_IFCHR + } else { + m |= unix.S_IFBLK + } + } else if mode&os.ModeNamedPipe != 0 { + m |= unix.S_IFIFO + } + + return unix.Mknod(p, m, int(dev)) +} + +// syscallMode returns the syscall-specific mode bits from Go's portable mode bits. +func syscallMode(i os.FileMode) (o uint32) { + o |= uint32(i.Perm()) + if i&os.ModeSetuid != 0 { + o |= unix.S_ISUID + } + if i&os.ModeSetgid != 0 { + o |= unix.S_ISGID + } + if i&os.ModeSticky != 0 { + o |= unix.S_ISVTX + } + return +} diff --git a/extender/code/vendor/github.com/containerd/continuity/devices/devices_windows.go b/extender/code/vendor/github.com/containerd/continuity/devices/devices_windows.go new file mode 100644 index 000000000..04627c805 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/devices/devices_windows.go @@ -0,0 +1,27 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package devices + +import ( + "os" + + "github.com/pkg/errors" +) + +func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) { + return 0, 0, errors.Wrap(ErrNotSupported, "cannot get device info on windows") +} diff --git a/extender/code/vendor/github.com/containerd/continuity/driver/driver.go b/extender/code/vendor/github.com/containerd/continuity/driver/driver.go new file mode 100644 index 000000000..327e96af1 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/driver/driver.go @@ -0,0 +1,174 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package driver + +import ( + "fmt" + "io" + "os" +) + +var ErrNotSupported = fmt.Errorf("not supported") + +// Driver provides all of the system-level functions in a common interface. +// The context should call these with full paths and should never use the `os` +// package or any other package to access resources on the filesystem. This +// mechanism let's us carefully control access to the context and maintain +// path and resource integrity. It also gives us an interface to reason about +// direct resource access. +// +// Implementations don't need to do much other than meet the interface. For +// example, it is not required to wrap os.FileInfo to return correct paths for +// the call to Name(). +type Driver interface { + // Note that Open() returns a File interface instead of *os.File. This + // is because os.File is a struct, so if Open was to return *os.File, + // the only way to fulfill the interface would be to call os.Open() + Open(path string) (File, error) + OpenFile(path string, flag int, perm os.FileMode) (File, error) + + Stat(path string) (os.FileInfo, error) + Lstat(path string) (os.FileInfo, error) + Readlink(p string) (string, error) + Mkdir(path string, mode os.FileMode) error + Remove(path string) error + + Link(oldname, newname string) error + Lchmod(path string, mode os.FileMode) error + Lchown(path string, uid, gid int64) error + Symlink(oldname, newname string) error + + MkdirAll(path string, perm os.FileMode) error + RemoveAll(path string) error + + // TODO(aaronl): These methods might move outside the main Driver + // interface in the future as more platforms are added. + Mknod(path string, mode os.FileMode, major int, minor int) error + Mkfifo(path string, mode os.FileMode) error +} + +// File is the interface for interacting with files returned by continuity's Open +// This is needed since os.File is a struct, instead of an interface, so it can't +// be used. +type File interface { + io.ReadWriteCloser + io.Seeker + Readdir(n int) ([]os.FileInfo, error) +} + +func NewSystemDriver() (Driver, error) { + // TODO(stevvooe): Consider having this take a "hint" path argument, which + // would be the context root. The hint could be used to resolve required + // filesystem support when assembling the driver to use. + return &driver{}, nil +} + +// XAttrDriver should be implemented on operation systems and filesystems that +// have xattr support for regular files and directories. +type XAttrDriver interface { + // Getxattr returns all of the extended attributes for the file at path. + // Typically, this takes a syscall call to Listxattr and Getxattr. + Getxattr(path string) (map[string][]byte, error) + + // Setxattr sets all of the extended attributes on file at path, following + // any symbolic links, if necessary. All attributes on the target are + // replaced by the values from attr. If the operation fails to set any + // attribute, those already applied will not be rolled back. + Setxattr(path string, attr map[string][]byte) error +} + +// LXAttrDriver should be implemented by drivers on operating systems and +// filesystems that support setting and getting extended attributes on +// symbolic links. If this is not implemented, extended attributes will be +// ignored on symbolic links. +type LXAttrDriver interface { + // LGetxattr returns all of the extended attributes for the file at path + // and does not follow symlinks. Typically, this takes a syscall call to + // Llistxattr and Lgetxattr. + LGetxattr(path string) (map[string][]byte, error) + + // LSetxattr sets all of the extended attributes on file at path, without + // following symbolic links. All attributes on the target are replaced by + // the values from attr. If the operation fails to set any attribute, + // those already applied will not be rolled back. + LSetxattr(path string, attr map[string][]byte) error +} + +type DeviceInfoDriver interface { + DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) +} + +// driver is a simple default implementation that sends calls out to the "os" +// package. Extend the "driver" type in system-specific files to add support, +// such as xattrs, which can add support at compile time. +type driver struct{} + +var _ File = &os.File{} + +// LocalDriver is the exported Driver struct for convenience. +var LocalDriver Driver = &driver{} + +func (d *driver) Open(p string) (File, error) { + return os.Open(p) +} + +func (d *driver) OpenFile(path string, flag int, perm os.FileMode) (File, error) { + return os.OpenFile(path, flag, perm) +} + +func (d *driver) Stat(p string) (os.FileInfo, error) { + return os.Stat(p) +} + +func (d *driver) Lstat(p string) (os.FileInfo, error) { + return os.Lstat(p) +} + +func (d *driver) Mkdir(p string, mode os.FileMode) error { + return os.Mkdir(p, mode) +} + +// Remove is used to unlink files and remove directories. +// This is following the golang os package api which +// combines the operations into a higher level Remove +// function. If explicit unlinking or directory removal +// to mirror system call is required, they should be +// split up at that time. +func (d *driver) Remove(path string) error { + return os.Remove(path) +} + +func (d *driver) Link(oldname, newname string) error { + return os.Link(oldname, newname) +} + +func (d *driver) Lchown(name string, uid, gid int64) error { + // TODO: error out if uid excesses int bit width? + return os.Lchown(name, int(uid), int(gid)) +} + +func (d *driver) Symlink(oldname, newname string) error { + return os.Symlink(oldname, newname) +} + +func (d *driver) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (d *driver) RemoveAll(path string) error { + return os.RemoveAll(path) +} diff --git a/extender/code/vendor/github.com/containerd/continuity/driver/driver_unix.go b/extender/code/vendor/github.com/containerd/continuity/driver/driver_unix.go new file mode 100644 index 000000000..6cb5d10fb --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/driver/driver_unix.go @@ -0,0 +1,138 @@ +// +build linux darwin freebsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package driver + +import ( + "errors" + "fmt" + "os" + "sort" + + "github.com/containerd/continuity/devices" + "github.com/containerd/continuity/sysx" +) + +func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error { + err := devices.Mknod(path, mode, major, minor) + if err != nil { + err = &os.PathError{Op: "mknod", Path: path, Err: err} + } + return err +} + +func (d *driver) Mkfifo(path string, mode os.FileMode) error { + if mode&os.ModeNamedPipe == 0 { + return errors.New("mode passed to Mkfifo does not have the named pipe bit set") + } + // mknod with a mode that has ModeNamedPipe set creates a fifo, not a + // device. + err := devices.Mknod(path, mode, 0, 0) + if err != nil { + err = &os.PathError{Op: "mkfifo", Path: path, Err: err} + } + return err +} + +// Getxattr returns all of the extended attributes for the file at path p. +func (d *driver) Getxattr(p string) (map[string][]byte, error) { + xattrs, err := sysx.Listxattr(p) + if err != nil { + return nil, fmt.Errorf("listing %s xattrs: %v", p, err) + } + + sort.Strings(xattrs) + m := make(map[string][]byte, len(xattrs)) + + for _, attr := range xattrs { + value, err := sysx.Getxattr(p, attr) + if err != nil { + return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err) + } + + // NOTE(stevvooe): This append/copy tricky relies on unique + // xattrs. Break this out into an alloc/copy if xattrs are no + // longer unique. + m[attr] = append(m[attr], value...) + } + + return m, nil +} + +// Setxattr sets all of the extended attributes on file at path, following +// any symbolic links, if necessary. All attributes on the target are +// replaced by the values from attr. If the operation fails to set any +// attribute, those already applied will not be rolled back. +func (d *driver) Setxattr(path string, attrMap map[string][]byte) error { + for attr, value := range attrMap { + if err := sysx.Setxattr(path, attr, value, 0); err != nil { + return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err) + } + } + + return nil +} + +// LGetxattr returns all of the extended attributes for the file at path p +// not following symbolic links. +func (d *driver) LGetxattr(p string) (map[string][]byte, error) { + xattrs, err := sysx.LListxattr(p) + if err != nil { + return nil, fmt.Errorf("listing %s xattrs: %v", p, err) + } + + sort.Strings(xattrs) + m := make(map[string][]byte, len(xattrs)) + + for _, attr := range xattrs { + value, err := sysx.LGetxattr(p, attr) + if err != nil { + return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err) + } + + // NOTE(stevvooe): This append/copy tricky relies on unique + // xattrs. Break this out into an alloc/copy if xattrs are no + // longer unique. + m[attr] = append(m[attr], value...) + } + + return m, nil +} + +// LSetxattr sets all of the extended attributes on file at path, not +// following any symbolic links. All attributes on the target are +// replaced by the values from attr. If the operation fails to set any +// attribute, those already applied will not be rolled back. +func (d *driver) LSetxattr(path string, attrMap map[string][]byte) error { + for attr, value := range attrMap { + if err := sysx.LSetxattr(path, attr, value, 0); err != nil { + return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err) + } + } + + return nil +} + +func (d *driver) DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) { + return devices.DeviceInfo(fi) +} + +// Readlink was forked on Windows to fix a Golang bug, use the "os" package here +func (d *driver) Readlink(p string) (string, error) { + return os.Readlink(p) +} diff --git a/extender/code/vendor/github.com/containerd/continuity/driver/driver_windows.go b/extender/code/vendor/github.com/containerd/continuity/driver/driver_windows.go new file mode 100644 index 000000000..f1dcea32a --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/driver/driver_windows.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package driver + +import ( + "os" + + "github.com/containerd/continuity/sysx" +) + +func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error { + return &os.PathError{Op: "mknod", Path: path, Err: ErrNotSupported} +} + +func (d *driver) Mkfifo(path string, mode os.FileMode) error { + return &os.PathError{Op: "mkfifo", Path: path, Err: ErrNotSupported} +} + +// Lchmod changes the mode of an file not following symlinks. +func (d *driver) Lchmod(path string, mode os.FileMode) (err error) { + // TODO: Use Window's equivalent + return os.Chmod(path, mode) +} + +// Readlink is forked in order to support Volume paths which are used +// in container layers. +func (d *driver) Readlink(p string) (string, error) { + return sysx.Readlink(p) +} diff --git a/extender/code/vendor/github.com/containerd/continuity/driver/lchmod_linux.go b/extender/code/vendor/github.com/containerd/continuity/driver/lchmod_linux.go new file mode 100644 index 000000000..06be28527 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/driver/lchmod_linux.go @@ -0,0 +1,39 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package driver + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// Lchmod changes the mode of a file not following symlinks. +func (d *driver) Lchmod(path string, mode os.FileMode) error { + // On Linux, file mode is not supported for symlinks, + // and fchmodat() does not support AT_SYMLINK_NOFOLLOW, + // so symlinks need to be skipped entirely. + if st, err := os.Stat(path); err == nil && st.Mode()&os.ModeSymlink != 0 { + return nil + } + + err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), 0) + if err != nil { + err = &os.PathError{Op: "lchmod", Path: path, Err: err} + } + return err +} diff --git a/extender/code/vendor/github.com/containerd/continuity/driver/lchmod_unix.go b/extender/code/vendor/github.com/containerd/continuity/driver/lchmod_unix.go new file mode 100644 index 000000000..b8877a8ae --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/driver/lchmod_unix.go @@ -0,0 +1,34 @@ +// +build darwin freebsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package driver + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// Lchmod changes the mode of a file not following symlinks. +func (d *driver) Lchmod(path string, mode os.FileMode) error { + err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) + if err != nil { + err = &os.PathError{Op: "lchmod", Path: path, Err: err} + } + return err +} diff --git a/extender/code/vendor/github.com/containerd/continuity/driver/utils.go b/extender/code/vendor/github.com/containerd/continuity/driver/utils.go new file mode 100644 index 000000000..0c688d158 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/driver/utils.go @@ -0,0 +1,90 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package driver + +import ( + "io" + "io/ioutil" + "os" + "sort" +) + +// ReadFile works the same as ioutil.ReadFile with the Driver abstraction +func ReadFile(r Driver, filename string) ([]byte, error) { + f, err := r.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return data, nil +} + +// WriteFile works the same as ioutil.WriteFile with the Driver abstraction +func WriteFile(r Driver, filename string, data []byte, perm os.FileMode) error { + f, err := r.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + defer f.Close() + + n, err := f.Write(data) + if err != nil { + return err + } else if n != len(data) { + return io.ErrShortWrite + } + + return nil +} + +// ReadDir works the same as ioutil.ReadDir with the Driver abstraction +func ReadDir(r Driver, dirname string) ([]os.FileInfo, error) { + f, err := r.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + dirs, err := f.Readdir(-1) + if err != nil { + return nil, err + } + + sort.Sort(fileInfos(dirs)) + return dirs, nil +} + +// Simple implementation of the sort.Interface for os.FileInfo +type fileInfos []os.FileInfo + +func (fis fileInfos) Len() int { + return len(fis) +} + +func (fis fileInfos) Less(i, j int) bool { + return fis[i].Name() < fis[j].Name() +} + +func (fis fileInfos) Swap(i, j int) { + fis[i], fis[j] = fis[j], fis[i] +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/copy.go b/extender/code/vendor/github.com/containerd/continuity/fs/copy.go new file mode 100644 index 000000000..97053d7e5 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/copy.go @@ -0,0 +1,172 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/pkg/errors" +) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32*1024) + return &buffer + }, +} + +// XAttrErrorHandlers transform a non-nil xattr error. +// Return nil to ignore an error. +// xattrKey can be empty for listxattr operation. +type XAttrErrorHandler func(dst, src, xattrKey string, err error) error + +type copyDirOpts struct { + xeh XAttrErrorHandler +} + +type CopyDirOpt func(*copyDirOpts) error + +// WithXAttrErrorHandler allows specifying XAttrErrorHandler +// If nil XAttrErrorHandler is specified (default), CopyDir stops +// on a non-nil xattr error. +func WithXAttrErrorHandler(xeh XAttrErrorHandler) CopyDirOpt { + return func(o *copyDirOpts) error { + o.xeh = xeh + return nil + } +} + +// WithAllowXAttrErrors allows ignoring xattr errors. +func WithAllowXAttrErrors() CopyDirOpt { + xeh := func(dst, src, xattrKey string, err error) error { + return nil + } + return WithXAttrErrorHandler(xeh) +} + +// CopyDir copies the directory from src to dst. +// Most efficient copy of files is attempted. +func CopyDir(dst, src string, opts ...CopyDirOpt) error { + var o copyDirOpts + for _, opt := range opts { + if err := opt(&o); err != nil { + return err + } + } + inodes := map[uint64]string{} + return copyDirectory(dst, src, inodes, &o) +} + +func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) error { + stat, err := os.Stat(src) + if err != nil { + return errors.Wrapf(err, "failed to stat %s", src) + } + if !stat.IsDir() { + return errors.Errorf("source %s is not directory", src) + } + + if st, err := os.Stat(dst); err != nil { + if err := os.Mkdir(dst, stat.Mode()); err != nil { + return errors.Wrapf(err, "failed to mkdir %s", dst) + } + } else if !st.IsDir() { + return errors.Errorf("cannot copy to non-directory: %s", dst) + } else { + if err := os.Chmod(dst, stat.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod on %s", dst) + } + } + + fis, err := ioutil.ReadDir(src) + if err != nil { + return errors.Wrapf(err, "failed to read %s", src) + } + + if err := copyFileInfo(stat, dst); err != nil { + return errors.Wrapf(err, "failed to copy file info for %s", dst) + } + + for _, fi := range fis { + source := filepath.Join(src, fi.Name()) + target := filepath.Join(dst, fi.Name()) + + switch { + case fi.IsDir(): + if err := copyDirectory(target, source, inodes, o); err != nil { + return err + } + continue + case (fi.Mode() & os.ModeType) == 0: + link, err := getLinkSource(target, fi, inodes) + if err != nil { + return errors.Wrap(err, "failed to get hardlink") + } + if link != "" { + if err := os.Link(link, target); err != nil { + return errors.Wrap(err, "failed to create hard link") + } + } else if err := CopyFile(target, source); err != nil { + return errors.Wrap(err, "failed to copy files") + } + case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink: + link, err := os.Readlink(source) + if err != nil { + return errors.Wrapf(err, "failed to read link: %s", source) + } + if err := os.Symlink(link, target); err != nil { + return errors.Wrapf(err, "failed to create symlink: %s", target) + } + case (fi.Mode() & os.ModeDevice) == os.ModeDevice: + if err := copyDevice(target, fi); err != nil { + return errors.Wrapf(err, "failed to create device") + } + default: + // TODO: Support pipes and sockets + return errors.Wrapf(err, "unsupported mode %s", fi.Mode()) + } + if err := copyFileInfo(fi, target); err != nil { + return errors.Wrap(err, "failed to copy file info") + } + + if err := copyXAttrs(target, source, o.xeh); err != nil { + return errors.Wrap(err, "failed to copy xattrs") + } + } + + return nil +} + +// CopyFile copies the source file to the target. +// The most efficient means of copying is used for the platform. +func CopyFile(target, source string) error { + src, err := os.Open(source) + if err != nil { + return errors.Wrapf(err, "failed to open source %s", source) + } + defer src.Close() + tgt, err := os.Create(target) + if err != nil { + return errors.Wrapf(err, "failed to open target %s", target) + } + defer tgt.Close() + + return copyFileContent(tgt, src) +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/copy_linux.go b/extender/code/vendor/github.com/containerd/continuity/fs/copy_linux.go new file mode 100644 index 000000000..81c71522a --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/copy_linux.go @@ -0,0 +1,144 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { + if os.IsPermission(err) { + // Normally if uid/gid are the same this would be a no-op, but some + // filesystems may still return EPERM... for instance NFS does this. + // In such a case, this is not an error. + if dstStat, err2 := os.Lstat(name); err2 == nil { + st2 := dstStat.Sys().(*syscall.Stat_t) + if st.Uid == st2.Uid && st.Gid == st2.Gid { + err = nil + } + } + } + if err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + } + + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + + return nil +} + +const maxSSizeT = int64(^uint(0) >> 1) + +func copyFileContent(dst, src *os.File) error { + st, err := src.Stat() + if err != nil { + return errors.Wrap(err, "unable to stat source") + } + + size := st.Size() + first := true + srcFd := int(src.Fd()) + dstFd := int(dst.Fd()) + + for size > 0 { + // Ensure that we are never trying to copy more than SSIZE_MAX at a + // time and at the same time avoids overflows when the file is larger + // than 4GB on 32-bit systems. + var copySize int + if size > maxSSizeT { + copySize = int(maxSSizeT) + } else { + copySize = int(size) + } + n, err := unix.CopyFileRange(srcFd, nil, dstFd, nil, copySize, 0) + if err != nil { + if (err != unix.ENOSYS && err != unix.EXDEV) || !first { + return errors.Wrap(err, "copy file range failed") + } + + buf := bufferPool.Get().(*[]byte) + _, err = io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return errors.Wrap(err, "userspace copy failed") + } + + first = false + size -= int64(n) + } + + return nil +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + e := errors.Wrapf(err, "failed to list xattrs on %s", src) + if xeh != nil { + e = xeh(dst, src, "", e) + } + return e + } + for _, xattr := range xattrKeys { + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { + e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + } + + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/copy_unix.go b/extender/code/vendor/github.com/containerd/continuity/fs/copy_unix.go new file mode 100644 index 000000000..73c01a46d --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/copy_unix.go @@ -0,0 +1,112 @@ +// +build solaris darwin freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { + if os.IsPermission(err) { + // Normally if uid/gid are the same this would be a no-op, but some + // filesystems may still return EPERM... for instance NFS does this. + // In such a case, this is not an error. + if dstStat, err2 := os.Lstat(name); err2 == nil { + st2 := dstStat.Sys().(*syscall.Stat_t) + if st.Uid == st2.Uid && st.Gid == st2.Gid { + err = nil + } + } + } + if err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + } + + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + timespec := []syscall.Timespec{StatAtime(st), StatMtime(st)} + if err := syscall.UtimesNano(name, timespec); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + + return nil +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + + return err +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + e := errors.Wrapf(err, "failed to list xattrs on %s", src) + if xeh != nil { + e = xeh(dst, src, "", e) + } + return e + } + for _, xattr := range xattrKeys { + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { + e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + } + + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/copy_windows.go b/extender/code/vendor/github.com/containerd/continuity/fs/copy_windows.go new file mode 100644 index 000000000..27c7d7dbb --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/copy_windows.go @@ -0,0 +1,49 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io" + "os" + + "github.com/pkg/errors" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + + // TODO: copy windows specific metadata + + return nil +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return err +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + return errors.New("device copy not supported") +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/diff.go b/extender/code/vendor/github.com/containerd/continuity/fs/diff.go new file mode 100644 index 000000000..e64f9e73d --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/diff.go @@ -0,0 +1,326 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "context" + "os" + "path/filepath" + "strings" + + "golang.org/x/sync/errgroup" + + "github.com/sirupsen/logrus" +) + +// ChangeKind is the type of modification that +// a change is making. +type ChangeKind int + +const ( + // ChangeKindUnmodified represents an unmodified + // file + ChangeKindUnmodified = iota + + // ChangeKindAdd represents an addition of + // a file + ChangeKindAdd + + // ChangeKindModify represents a change to + // an existing file + ChangeKindModify + + // ChangeKindDelete represents a delete of + // a file + ChangeKindDelete +) + +func (k ChangeKind) String() string { + switch k { + case ChangeKindUnmodified: + return "unmodified" + case ChangeKindAdd: + return "add" + case ChangeKindModify: + return "modify" + case ChangeKindDelete: + return "delete" + default: + return "" + } +} + +// Change represents single change between a diff and its parent. +type Change struct { + Kind ChangeKind + Path string +} + +// ChangeFunc is the type of function called for each change +// computed during a directory changes calculation. +type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error + +// Changes computes changes between two directories calling the +// given change function for each computed change. The first +// directory is intended to the base directory and second +// directory the changed directory. +// +// The change callback is called by the order of path names and +// should be appliable in that order. +// Due to this apply ordering, the following is true +// - Removed directory trees only create a single change for the root +// directory removed. Remaining changes are implied. +// - A directory which is modified to become a file will not have +// delete entries for sub-path items, their removal is implied +// by the removal of the parent directory. +// +// Opaque directories will not be treated specially and each file +// removed from the base directory will show up as a removal. +// +// File content comparisons will be done on files which have timestamps +// which may have been truncated. If either of the files being compared +// has a zero value nanosecond value, each byte will be compared for +// differences. If 2 files have the same seconds value but different +// nanosecond values where one of those values is zero, the files will +// be considered unchanged if the content is the same. This behavior +// is to account for timestamp truncation during archiving. +func Changes(ctx context.Context, a, b string, changeFn ChangeFunc) error { + if a == "" { + logrus.Debugf("Using single walk diff for %s", b) + return addDirChanges(ctx, changeFn, b) + } else if diffOptions := detectDirDiff(b, a); diffOptions != nil { + logrus.Debugf("Using single walk diff for %s from %s", diffOptions.diffDir, a) + return diffDirChanges(ctx, changeFn, a, diffOptions) + } + + logrus.Debugf("Using double walk diff for %s from %s", b, a) + return doubleWalkDiff(ctx, changeFn, a, b) +} + +func addDirChanges(ctx context.Context, changeFn ChangeFunc, root string) error { + return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(root, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + return changeFn(ChangeKindAdd, path, f, nil) + }) +} + +// diffDirOptions is used when the diff can be directly calculated from +// a diff directory to its base, without walking both trees. +type diffDirOptions struct { + diffDir string + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) +} + +// diffDirChanges walks the diff directory and compares changes against the base. +func diffDirChanges(ctx context.Context, changeFn ChangeFunc, base string, o *diffDirOptions) error { + changedDirs := make(map[string]struct{}) + return filepath.Walk(o.diffDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(o.diffDir, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + // TODO: handle opaqueness, start new double walker at this + // location to get deletes, and skip tree in single walker + + if o.skipChange != nil { + if skip, err := o.skipChange(path); skip { + return err + } + } + + var kind ChangeKind + + deletedFile, err := o.deleteChange(o.diffDir, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + path = deletedFile + kind = ChangeKindDelete + f = nil + } else { + // Otherwise, the file was added + kind = ChangeKindAdd + + // ...Unless it already existed in a base, in which case, it's a modification + stat, err := os.Stat(filepath.Join(base, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the base, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + kind = ChangeKindModify + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if kind == ChangeKindAdd || kind == ChangeKindDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + pi, err := os.Stat(filepath.Join(o.diffDir, parent)) + if err := changeFn(ChangeKindModify, parent, pi, err); err != nil { + return err + } + changedDirs[parent] = struct{}{} + } + } + + return changeFn(kind, path, f, nil) + }) +} + +// doubleWalkDiff walks both directories to create a diff +func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err error) { + g, ctx := errgroup.WithContext(ctx) + + var ( + c1 = make(chan *currentPath) + c2 = make(chan *currentPath) + + f1, f2 *currentPath + rmdir string + ) + g.Go(func() error { + defer close(c1) + return pathWalk(ctx, a, c1) + }) + g.Go(func() error { + defer close(c2) + return pathWalk(ctx, b, c2) + }) + g.Go(func() error { + for c1 != nil || c2 != nil { + if f1 == nil && c1 != nil { + f1, err = nextPath(ctx, c1) + if err != nil { + return err + } + if f1 == nil { + c1 = nil + } + } + + if f2 == nil && c2 != nil { + f2, err = nextPath(ctx, c2) + if err != nil { + return err + } + if f2 == nil { + c2 = nil + } + } + if f1 == nil && f2 == nil { + continue + } + + var f os.FileInfo + k, p := pathChange(f1, f2) + switch k { + case ChangeKindAdd: + if rmdir != "" { + rmdir = "" + } + f = f2.f + f2 = nil + case ChangeKindDelete: + // Check if this file is already removed by being + // under of a removed directory + if rmdir != "" && strings.HasPrefix(f1.path, rmdir) { + f1 = nil + continue + } else if f1.f.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f1 = nil + case ChangeKindModify: + same, err := sameFile(f1, f2) + if err != nil { + return err + } + if f1.f.IsDir() && !f2.f.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f = f2.f + f1 = nil + f2 = nil + if same { + if !isLinked(f) { + continue + } + k = ChangeKindUnmodified + } + } + if err := changeFn(k, p, f, nil); err != nil { + return err + } + } + return nil + }) + + return g.Wait() +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/diff_unix.go b/extender/code/vendor/github.com/containerd/continuity/fs/diff_unix.go new file mode 100644 index 000000000..7913af27d --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/diff_unix.go @@ -0,0 +1,74 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "bytes" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" +) + +// detectDirDiff returns diff dir options if a directory could +// be found in the mount info for upper which is the direct +// diff with the provided lower directory +func detectDirDiff(upper, lower string) *diffDirOptions { + // TODO: get mount options for upper + // TODO: detect AUFS + // TODO: detect overlay + return nil +} + +// compareSysStat returns whether the stats are equivalent, +// whether the files are considered the same file, and +// an error +func compareSysStat(s1, s2 interface{}) (bool, error) { + ls1, ok := s1.(*syscall.Stat_t) + if !ok { + return false, nil + } + ls2, ok := s2.(*syscall.Stat_t) + if !ok { + return false, nil + } + + return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil +} + +func compareCapabilities(p1, p2 string) (bool, error) { + c1, err := sysx.LGetxattr(p1, "security.capability") + if err != nil && err != sysx.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p1) + } + c2, err := sysx.LGetxattr(p2, "security.capability") + if err != nil && err != sysx.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p2) + } + return bytes.Equal(c1, c2), nil +} + +func isLinked(f os.FileInfo) bool { + s, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return false + } + return !f.IsDir() && s.Nlink > 1 +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/diff_windows.go b/extender/code/vendor/github.com/containerd/continuity/fs/diff_windows.go new file mode 100644 index 000000000..4bfa72d3a --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/diff_windows.go @@ -0,0 +1,48 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func detectDirDiff(upper, lower string) *diffDirOptions { + return nil +} + +func compareSysStat(s1, s2 interface{}) (bool, error) { + f1, ok := s1.(windows.Win32FileAttributeData) + if !ok { + return false, nil + } + f2, ok := s2.(windows.Win32FileAttributeData) + if !ok { + return false, nil + } + return f1.FileAttributes == f2.FileAttributes, nil +} + +func compareCapabilities(p1, p2 string) (bool, error) { + // TODO: Use windows equivalent + return true, nil +} + +func isLinked(os.FileInfo) bool { + return false +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/dtype_linux.go b/extender/code/vendor/github.com/containerd/continuity/fs/dtype_linux.go new file mode 100644 index 000000000..10510d8de --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/dtype_linux.go @@ -0,0 +1,103 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + err = dummyFile.Close() + return name, err +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *syscall.Dirent) bool { + visited++ + if ent.Type == syscall.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := syscall.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/du.go b/extender/code/vendor/github.com/containerd/continuity/fs/du.go new file mode 100644 index 000000000..fccc985dc --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/du.go @@ -0,0 +1,38 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "context" + +// Usage of disk information +type Usage struct { + Inodes int64 + Size int64 +} + +// DiskUsage counts the number of inodes and disk usage for the resources under +// path. +func DiskUsage(ctx context.Context, roots ...string) (Usage, error) { + return diskUsage(ctx, roots...) +} + +// DiffUsage counts the numbers of inodes and disk usage in the +// diff between the 2 directories. The first path is intended +// as the base directory and the second as the changed directory. +func DiffUsage(ctx context.Context, a, b string) (Usage, error) { + return diffUsage(ctx, a, b) +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/du_unix.go b/extender/code/vendor/github.com/containerd/continuity/fs/du_unix.go new file mode 100644 index 000000000..e22ffbea3 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/du_unix.go @@ -0,0 +1,110 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "context" + "os" + "path/filepath" + "syscall" +) + +type inode struct { + // TODO(stevvooe): Can probably reduce memory usage by not tracking + // device, but we can leave this right for now. + dev, ino uint64 +} + +func newInode(stat *syscall.Stat_t) inode { + return inode{ + // Dev is uint32 on darwin/bsd, uint64 on linux/solaris + dev: uint64(stat.Dev), // nolint: unconvert + // Ino is uint32 on bsd, uint64 on darwin/linux/solaris + ino: uint64(stat.Ino), // nolint: unconvert + } +} + +func diskUsage(ctx context.Context, roots ...string) (Usage, error) { + + var ( + size int64 + inodes = map[inode]struct{}{} // expensive! + ) + + for _, root := range roots { + if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + inoKey := newInode(fi.Sys().(*syscall.Stat_t)) + if _, ok := inodes[inoKey]; !ok { + inodes[inoKey] = struct{}{} + size += fi.Size() + } + + return nil + }); err != nil { + return Usage{}, err + } + } + + return Usage{ + Inodes: int64(len(inodes)), + Size: size, + }, nil +} + +func diffUsage(ctx context.Context, a, b string) (Usage, error) { + var ( + size int64 + inodes = map[inode]struct{}{} // expensive! + ) + + if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if kind == ChangeKindAdd || kind == ChangeKindModify { + inoKey := newInode(fi.Sys().(*syscall.Stat_t)) + if _, ok := inodes[inoKey]; !ok { + inodes[inoKey] = struct{}{} + size += fi.Size() + } + + return nil + + } + return nil + }); err != nil { + return Usage{}, err + } + + return Usage{ + Inodes: int64(len(inodes)), + Size: size, + }, nil +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/du_windows.go b/extender/code/vendor/github.com/containerd/continuity/fs/du_windows.go new file mode 100644 index 000000000..8f25ec59c --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/du_windows.go @@ -0,0 +1,82 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "context" + "os" + "path/filepath" +) + +func diskUsage(ctx context.Context, roots ...string) (Usage, error) { + var ( + size int64 + ) + + // TODO(stevvooe): Support inodes (or equivalent) for windows. + + for _, root := range roots { + if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + size += fi.Size() + return nil + }); err != nil { + return Usage{}, err + } + } + + return Usage{ + Size: size, + }, nil +} + +func diffUsage(ctx context.Context, a, b string) (Usage, error) { + var ( + size int64 + ) + + if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if kind == ChangeKindAdd || kind == ChangeKindModify { + size += fi.Size() + + return nil + + } + return nil + }); err != nil { + return Usage{}, err + } + + return Usage{ + Size: size, + }, nil +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/hardlink.go b/extender/code/vendor/github.com/containerd/continuity/fs/hardlink.go new file mode 100644 index 000000000..762aa45e6 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/hardlink.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "os" + +// GetLinkInfo returns an identifier representing the node a hardlink is pointing +// to. If the file is not hard linked then 0 will be returned. +func GetLinkInfo(fi os.FileInfo) (uint64, bool) { + return getLinkInfo(fi) +} + +// getLinkSource returns a path for the given name and +// file info to its link source in the provided inode +// map. If the given file name is not in the map and +// has other links, it is added to the inode map +// to be a source for other link locations. +func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) { + inode, isHardlink := getLinkInfo(fi) + if !isHardlink { + return "", nil + } + + path, ok := inodes[inode] + if !ok { + inodes[inode] = name + } + return path, nil +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/hardlink_unix.go b/extender/code/vendor/github.com/containerd/continuity/fs/hardlink_unix.go new file mode 100644 index 000000000..f95f0904c --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/hardlink_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "os" + "syscall" +) + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, false + } + + // Ino is uint32 on bsd, uint64 on darwin/linux/solaris + return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 // nolint: unconvert +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/hardlink_windows.go b/extender/code/vendor/github.com/containerd/continuity/fs/hardlink_windows.go new file mode 100644 index 000000000..748554714 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/hardlink_windows.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "os" + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + return 0, false +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/path.go b/extender/code/vendor/github.com/containerd/continuity/fs/path.go new file mode 100644 index 000000000..8863caa9d --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/path.go @@ -0,0 +1,313 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "bytes" + "context" + "io" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +var ( + errTooManyLinks = errors.New("too many links") +) + +type currentPath struct { + path string + f os.FileInfo + fullPath string +} + +func pathChange(lower, upper *currentPath) (ChangeKind, string) { + if lower == nil { + if upper == nil { + panic("cannot compare nil paths") + } + return ChangeKindAdd, upper.path + } + if upper == nil { + return ChangeKindDelete, lower.path + } + + switch i := directoryCompare(lower.path, upper.path); { + case i < 0: + // File in lower that is not in upper + return ChangeKindDelete, lower.path + case i > 0: + // File in upper that is not in lower + return ChangeKindAdd, upper.path + default: + return ChangeKindModify, upper.path + } +} + +func directoryCompare(a, b string) int { + l := len(a) + if len(b) < l { + l = len(b) + } + for i := 0; i < l; i++ { + c1, c2 := a[i], b[i] + if c1 == filepath.Separator { + c1 = byte(0) + } + if c2 == filepath.Separator { + c2 = byte(0) + } + if c1 < c2 { + return -1 + } + if c1 > c2 { + return +1 + } + } + if len(a) < len(b) { + return -1 + } + if len(a) > len(b) { + return +1 + } + return 0 +} + +func sameFile(f1, f2 *currentPath) (bool, error) { + if os.SameFile(f1.f, f2.f) { + return true, nil + } + + equalStat, err := compareSysStat(f1.f.Sys(), f2.f.Sys()) + if err != nil || !equalStat { + return equalStat, err + } + + if eq, err := compareCapabilities(f1.fullPath, f2.fullPath); err != nil || !eq { + return eq, err + } + + // If not a directory also check size, modtime, and content + if !f1.f.IsDir() { + if f1.f.Size() != f2.f.Size() { + return false, nil + } + t1 := f1.f.ModTime() + t2 := f2.f.ModTime() + + if t1.Unix() != t2.Unix() { + return false, nil + } + + // If the timestamp may have been truncated in both of the + // files, check content of file to determine difference + if t1.Nanosecond() == 0 && t2.Nanosecond() == 0 { + var eq bool + if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink { + eq, err = compareSymlinkTarget(f1.fullPath, f2.fullPath) + } else if f1.f.Size() > 0 { + eq, err = compareFileContent(f1.fullPath, f2.fullPath) + } + if err != nil || !eq { + return eq, err + } + } else if t1.Nanosecond() != t2.Nanosecond() { + return false, nil + } + } + + return true, nil +} + +func compareSymlinkTarget(p1, p2 string) (bool, error) { + t1, err := os.Readlink(p1) + if err != nil { + return false, err + } + t2, err := os.Readlink(p2) + if err != nil { + return false, err + } + return t1 == t2, nil +} + +const compareChuckSize = 32 * 1024 + +// compareFileContent compares the content of 2 same sized files +// by comparing each byte. +func compareFileContent(p1, p2 string) (bool, error) { + f1, err := os.Open(p1) + if err != nil { + return false, err + } + defer f1.Close() + f2, err := os.Open(p2) + if err != nil { + return false, err + } + defer f2.Close() + + b1 := make([]byte, compareChuckSize) + b2 := make([]byte, compareChuckSize) + for { + n1, err1 := f1.Read(b1) + if err1 != nil && err1 != io.EOF { + return false, err1 + } + n2, err2 := f2.Read(b2) + if err2 != nil && err2 != io.EOF { + return false, err2 + } + if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) { + return false, nil + } + if err1 == io.EOF && err2 == io.EOF { + return true, nil + } + } +} + +func pathWalk(ctx context.Context, root string, pathC chan<- *currentPath) error { + return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(root, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + p := ¤tPath{ + path: path, + f: f, + fullPath: filepath.Join(root, path), + } + + select { + case <-ctx.Done(): + return ctx.Err() + case pathC <- p: + return nil + } + }) +} + +func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case p := <-pathC: + return p, nil + } +} + +// RootPath joins a path with a root, evaluating and bounding any +// symlink to the root directory. +func RootPath(root, path string) (string, error) { + if path == "" { + return root, nil + } + var linksWalked int // to protect against cycles + for { + i := linksWalked + newpath, err := walkLinks(root, path, &linksWalked) + if err != nil { + return "", err + } + path = newpath + if i == linksWalked { + newpath = filepath.Join("/", newpath) + if path == newpath { + return filepath.Join(root, newpath), nil + } + path = newpath + } + } +} + +func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) { + if *linksWalked > 255 { + return "", false, errTooManyLinks + } + + path = filepath.Join("/", path) + if path == "/" { + return path, false, nil + } + realPath := filepath.Join(root, path) + + fi, err := os.Lstat(realPath) + if err != nil { + // If path does not yet exist, treat as non-symlink + if os.IsNotExist(err) { + return path, false, nil + } + return "", false, err + } + if fi.Mode()&os.ModeSymlink == 0 { + return path, false, nil + } + newpath, err = os.Readlink(realPath) + if err != nil { + return "", false, err + } + *linksWalked++ + return newpath, true, nil +} + +func walkLinks(root, path string, linksWalked *int) (string, error) { + switch dir, file := filepath.Split(path); { + case dir == "": + newpath, _, err := walkLink(root, file, linksWalked) + return newpath, err + case file == "": + if os.IsPathSeparator(dir[len(dir)-1]) { + if dir == "/" { + return dir, nil + } + return walkLinks(root, dir[:len(dir)-1], linksWalked) + } + newpath, _, err := walkLink(root, dir, linksWalked) + return newpath, err + default: + newdir, err := walkLinks(root, dir, linksWalked) + if err != nil { + return "", err + } + newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked) + if err != nil { + return "", err + } + if !islink { + return newpath, nil + } + if filepath.IsAbs(newpath) { + return newpath, nil + } + return filepath.Join(newdir, newpath), nil + } +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/stat_bsd.go b/extender/code/vendor/github.com/containerd/continuity/fs/stat_bsd.go new file mode 100644 index 000000000..cb7400a33 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/stat_bsd.go @@ -0,0 +1,44 @@ +// +build darwin freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "syscall" + "time" +) + +// StatAtime returns the access time from a stat struct +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atimespec +} + +// StatCtime returns the created time from a stat struct +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctimespec +} + +// StatMtime returns the modified time from a stat struct +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtimespec +} + +// StatATimeAsTime returns the access time as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/stat_linux.go b/extender/code/vendor/github.com/containerd/continuity/fs/stat_linux.go new file mode 100644 index 000000000..4a678dd1f --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/stat_linux.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "syscall" + "time" +) + +// StatAtime returns the Atim +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atim +} + +// StatCtime returns the Ctim +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctim +} + +// StatMtime returns the Mtim +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtim +} + +// StatATimeAsTime returns st.Atim as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + // The int64 conversions ensure the line compiles for 32-bit systems as well. + return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert +} diff --git a/extender/code/vendor/github.com/containerd/continuity/fs/time.go b/extender/code/vendor/github.com/containerd/continuity/fs/time.go new file mode 100644 index 000000000..cde456123 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/fs/time.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "time" + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} diff --git a/extender/code/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/extender/code/vendor/github.com/containerd/continuity/pathdriver/path_driver.go new file mode 100644 index 000000000..b0d5a6b56 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/pathdriver/path_driver.go @@ -0,0 +1,101 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pathdriver + +import ( + "path/filepath" +) + +// PathDriver provides all of the path manipulation functions in a common +// interface. The context should call these and never use the `filepath` +// package or any other package to manipulate paths. +type PathDriver interface { + Join(paths ...string) string + IsAbs(path string) bool + Rel(base, target string) (string, error) + Base(path string) string + Dir(path string) string + Clean(path string) string + Split(path string) (dir, file string) + Separator() byte + Abs(path string) (string, error) + Walk(string, filepath.WalkFunc) error + FromSlash(path string) string + ToSlash(path string) string + Match(pattern, name string) (matched bool, err error) +} + +// pathDriver is a simple default implementation calls the filepath package. +type pathDriver struct{} + +// LocalPathDriver is the exported pathDriver struct for convenience. +var LocalPathDriver PathDriver = &pathDriver{} + +func (*pathDriver) Join(paths ...string) string { + return filepath.Join(paths...) +} + +func (*pathDriver) IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +func (*pathDriver) Rel(base, target string) (string, error) { + return filepath.Rel(base, target) +} + +func (*pathDriver) Base(path string) string { + return filepath.Base(path) +} + +func (*pathDriver) Dir(path string) string { + return filepath.Dir(path) +} + +func (*pathDriver) Clean(path string) string { + return filepath.Clean(path) +} + +func (*pathDriver) Split(path string) (dir, file string) { + return filepath.Split(path) +} + +func (*pathDriver) Separator() byte { + return filepath.Separator +} + +func (*pathDriver) Abs(path string) (string, error) { + return filepath.Abs(path) +} + +// Note that filepath.Walk calls os.Stat, so if the context wants to +// to call Driver.Stat() for Walk, they need to create a new struct that +// overrides this method. +func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error { + return filepath.Walk(root, walkFn) +} + +func (*pathDriver) FromSlash(path string) string { + return filepath.FromSlash(path) +} + +func (*pathDriver) ToSlash(path string) string { + return filepath.ToSlash(path) +} + +func (*pathDriver) Match(pattern, name string) (bool, error) { + return filepath.Match(pattern, name) +} diff --git a/extender/code/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go b/extender/code/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go new file mode 100644 index 000000000..0bfa6a040 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go @@ -0,0 +1,26 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package syscallx + +import "syscall" + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + return syscall.Readlink(path, buf) +} diff --git a/extender/code/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go b/extender/code/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go new file mode 100644 index 000000000..2ba814990 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go @@ -0,0 +1,112 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package syscallx + +import ( + "syscall" + "unsafe" +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + + // GenericReparseBuffer + reparseBuffer byte +} + +type mountPointReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + PathBuffer [1]uint16 +} + +type symbolicLinkReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + Flags uint32 + PathBuffer [1]uint16 +} + +const ( + _IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + _SYMLINK_FLAG_RELATIVE = 1 +) + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), syscall.GENERIC_READ, 0, nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return -1, err + } + defer syscall.CloseHandle(fd) + + rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE) + var bytesReturned uint32 + err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) + if err != nil { + return -1, err + } + + rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) + var s string + switch rdb.ReparseTag { + case syscall.IO_REPARSE_TAG_SYMLINK: + data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2]) + if data.Flags&_SYMLINK_FLAG_RELATIVE == 0 { + if len(s) >= 4 && s[:4] == `\??\` { + s = s[4:] + switch { + case len(s) >= 2 && s[1] == ':': // \??\C:\foo\bar + // do nothing + case len(s) >= 4 && s[:4] == `UNC\`: // \??\UNC\foo\bar + s = `\\` + s[4:] + default: + // unexpected; do nothing + } + } else { + // unexpected; do nothing + } + } + case _IO_REPARSE_TAG_MOUNT_POINT: + data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2]) + if len(s) >= 4 && s[:4] == `\??\` { // \??\C:\foo\bar + if len(s) < 48 || s[:11] != `\??\Volume{` { + s = s[4:] + } + } else { + // unexpected; do nothing + } + default: + // the path is not a symlink or junction but another type of reparse + // point + return -1, syscall.ENOENT + } + n = copy(buf, []byte(s)) + + return n, nil +} diff --git a/extender/code/vendor/github.com/containerd/continuity/sysx/README.md b/extender/code/vendor/github.com/containerd/continuity/sysx/README.md new file mode 100644 index 000000000..ad7aee533 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/sysx/README.md @@ -0,0 +1,3 @@ +This package is for internal use only. It is intended to only have +temporary changes before they are upstreamed to golang.org/x/sys/ +(a.k.a. https://github.com/golang/sys). diff --git a/extender/code/vendor/github.com/containerd/continuity/sysx/file_posix.go b/extender/code/vendor/github.com/containerd/continuity/sysx/file_posix.go new file mode 100644 index 000000000..e28f3a1b5 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/sysx/file_posix.go @@ -0,0 +1,128 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "os" + "path/filepath" + + "github.com/containerd/continuity/syscallx" +) + +// Readlink returns the destination of the named symbolic link. +// If there is an error, it will be of type *PathError. +func Readlink(name string) (string, error) { + for len := 128; ; len *= 2 { + b := make([]byte, len) + n, e := fixCount(syscallx.Readlink(fixLongPath(name), b)) + if e != nil { + return "", &os.PathError{Op: "readlink", Path: name, Err: e} + } + if n < len { + return string(b[0:n]), nil + } + } +} + +// Many functions in package syscall return a count of -1 instead of 0. +// Using fixCount(call()) instead of call() corrects the count. +func fixCount(n int, err error) (int, error) { + if n < 0 { + n = 0 + } + return n, err +} + +// fixLongPath returns the extended-length (\\?\-prefixed) form of +// path when needed, in order to avoid the default 260 character file +// path limit imposed by Windows. If path is not easily converted to +// the extended-length form (for example, if path is a relative path +// or contains .. elements), or is short enough, fixLongPath returns +// path unmodified. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath +func fixLongPath(path string) string { + // Do nothing (and don't allocate) if the path is "short". + // Empirically (at least on the Windows Server 2013 builder), + // the kernel is arbitrarily okay with < 248 bytes. That + // matches what the docs above say: + // "When using an API to create a directory, the specified + // path cannot be so long that you cannot append an 8.3 file + // name (that is, the directory name cannot exceed MAX_PATH + // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. + // + // The MSDN docs appear to say that a normal path that is 248 bytes long + // will work; empirically the path must be less then 248 bytes long. + if len(path) < 248 { + // Don't fix. (This is how Go 1.7 and earlier worked, + // not automatically generating the \\?\ form) + return path + } + + // The extended form begins with \\?\, as in + // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. + // The extended form disables evaluation of . and .. path + // elements and disables the interpretation of / as equivalent + // to \. The conversion here rewrites / to \ and elides + // . elements as well as trailing or duplicate separators. For + // simplicity it avoids the conversion entirely for relative + // paths or paths containing .. elements. For now, + // \\server\share paths are not converted to + // \\?\UNC\server\share paths because the rules for doing so + // are less well-specified. + if len(path) >= 2 && path[:2] == `\\` { + // Don't canonicalize UNC paths. + return path + } + if !filepath.IsAbs(path) { + // Relative path + return path + } + + const prefix = `\\?` + + pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) + copy(pathbuf, prefix) + n := len(path) + r, w := 0, len(prefix) + for r < n { + switch { + case os.IsPathSeparator(path[r]): + // empty block + r++ + case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): + // /./ + r++ + case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): + // /../ is currently unhandled + return path + default: + pathbuf[w] = '\\' + w++ + for ; r < n && !os.IsPathSeparator(path[r]); r++ { + pathbuf[w] = path[r] + w++ + } + } + } + // A drive's root directory needs a trailing \ + if w == len(`\\?\c:`) { + pathbuf[w] = '\\' + w++ + } + return string(pathbuf[:w]) +} diff --git a/extender/code/vendor/github.com/containerd/continuity/sysx/generate.sh b/extender/code/vendor/github.com/containerd/continuity/sysx/generate.sh new file mode 100644 index 000000000..87d708d7a --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/sysx/generate.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +mksyscall="$(go env GOROOT)/src/syscall/mksyscall.pl" + +fix() { + sed 's,^package syscall$,package sysx,' \ + | sed 's,^import "unsafe"$,import (\n\t"syscall"\n\t"unsafe"\n),' \ + | gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \ + | gofmt -r='Syscall6 -> syscall.Syscall6' \ + | gofmt -r='Syscall -> syscall.Syscall' \ + | gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \ + | gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \ + | gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \ + | gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \ + | gofmt -r='SYS_LGETXATTR -> syscall.SYS_LGETXATTR' \ + | gofmt -r='SYS_LLISTXATTR -> syscall.SYS_LLISTXATTR' \ + | gofmt -r='SYS_LSETXATTR -> syscall.SYS_LSETXATTR' \ + | gofmt -r='SYS_LREMOVEXATTR -> syscall.SYS_LREMOVEXATTR' +} + +if [ "$GOARCH" == "" ] || [ "$GOOS" == "" ]; then + echo "Must specify \$GOARCH and \$GOOS" + exit 1 +fi + +mkargs="" + +if [ "$GOARCH" == "386" ] || [ "$GOARCH" == "arm" ]; then + mkargs="-l32" +fi + +for f in "$@"; do + $mksyscall $mkargs "${f}_${GOOS}.go" | fix > "${f}_${GOOS}_${GOARCH}.go" +done + diff --git a/extender/code/vendor/github.com/containerd/continuity/sysx/nodata_linux.go b/extender/code/vendor/github.com/containerd/continuity/sysx/nodata_linux.go new file mode 100644 index 000000000..28ce5d8de --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/sysx/nodata_linux.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "syscall" +) + +const ENODATA = syscall.ENODATA diff --git a/extender/code/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go b/extender/code/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go new file mode 100644 index 000000000..e0575f446 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go @@ -0,0 +1,24 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "syscall" +) + +// This should actually be a set that contains ENOENT and EPERM +const ENODATA = syscall.ENOENT diff --git a/extender/code/vendor/github.com/containerd/continuity/sysx/nodata_unix.go b/extender/code/vendor/github.com/containerd/continuity/sysx/nodata_unix.go new file mode 100644 index 000000000..b26f5b3d0 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/sysx/nodata_unix.go @@ -0,0 +1,25 @@ +// +build darwin freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "syscall" +) + +const ENODATA = syscall.ENOATTR diff --git a/extender/code/vendor/github.com/containerd/continuity/sysx/xattr.go b/extender/code/vendor/github.com/containerd/continuity/sysx/xattr.go new file mode 100644 index 000000000..9e4326dcf --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/sysx/xattr.go @@ -0,0 +1,125 @@ +// +build linux darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "bytes" + "syscall" + + "golang.org/x/sys/unix" +) + +// Listxattr calls syscall listxattr and reads all content +// and returns a string array +func Listxattr(path string) ([]string, error) { + return listxattrAll(path, unix.Listxattr) +} + +// Removexattr calls syscall removexattr +func Removexattr(path string, attr string) (err error) { + return unix.Removexattr(path, attr) +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return unix.Setxattr(path, attr, data, flags) +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, unix.Getxattr) +} + +// LListxattr lists xattrs, not following symlinks +func LListxattr(path string) ([]string, error) { + return listxattrAll(path, unix.Llistxattr) +} + +// LRemovexattr removes an xattr, not following symlinks +func LRemovexattr(path string, attr string) (err error) { + return unix.Lremovexattr(path, attr) +} + +// LSetxattr sets an xattr, not following symlinks +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return unix.Lsetxattr(path, attr, data, flags) +} + +// LGetxattr gets an xattr, not following symlinks +func LGetxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, unix.Lgetxattr) +} + +const defaultXattrBufferSize = 5 + +type listxattrFunc func(path string, dest []byte) (int, error) + +func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) { + var p []byte // nil on first execution + + for { + n, err := listFunc(path, p) // first call gets buffer size. + if err != nil { + return nil, err + } + + if n > len(p) { + p = make([]byte, n) + continue + } + + p = p[:n] + + ps := bytes.Split(bytes.TrimSuffix(p, []byte{0}), []byte{0}) + var entries []string + for _, p := range ps { + s := string(p) + if s != "" { + entries = append(entries, s) + } + } + + return entries, nil + } +} + +type getxattrFunc func(string, string, []byte) (int, error) + +func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) { + p := make([]byte, defaultXattrBufferSize) + for { + n, err := getFunc(path, attr, p) + if err != nil { + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ERANGE { + p = make([]byte, len(p)*2) // this can't be ideal. + continue // try again! + } + + return nil, err + } + + // realloc to correct size and repeat + if n > len(p) { + p = make([]byte, n) + continue + } + + return p[:n], nil + } +} diff --git a/extender/code/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go b/extender/code/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go new file mode 100644 index 000000000..c9ef3a1d2 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go @@ -0,0 +1,67 @@ +// +build !linux,!darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "errors" + "runtime" +) + +var unsupported = errors.New("extended attributes unsupported on " + runtime.GOOS) + +// Listxattr calls syscall listxattr and reads all content +// and returns a string array +func Listxattr(path string) ([]string, error) { + return []string{}, nil +} + +// Removexattr calls syscall removexattr +func Removexattr(path string, attr string) (err error) { + return unsupported +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return unsupported +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return []byte{}, unsupported +} + +// LListxattr lists xattrs, not following symlinks +func LListxattr(path string) ([]string, error) { + return []string{}, nil +} + +// LRemovexattr removes an xattr, not following symlinks +func LRemovexattr(path string, attr string) (err error) { + return unsupported +} + +// LSetxattr sets an xattr, not following symlinks +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return unsupported +} + +// LGetxattr gets an xattr, not following symlinks +func LGetxattr(path, attr string) ([]byte, error) { + return []byte{}, nil +} diff --git a/extender/code/vendor/github.com/containerd/fifo/.gitignore b/extender/code/vendor/github.com/containerd/fifo/.gitignore new file mode 100644 index 000000000..c57100a59 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/fifo/.gitignore @@ -0,0 +1 @@ +coverage.txt diff --git a/extender/code/vendor/github.com/containerd/fifo/.travis.yml b/extender/code/vendor/github.com/containerd/fifo/.travis.yml new file mode 100644 index 000000000..c6eaba059 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/fifo/.travis.yml @@ -0,0 +1,22 @@ +language: go +go: + - 1.11.x + - tip + +install: + - go get -u github.com/vbatts/git-validation + - go get -u github.com/kunalkushwaha/ltag + +before_script: + - pushd ..; git clone https://github.com/containerd/project; popd + +script: + - DCO_VERBOSITY=-q ../project/script/validate/dco + - ../project/script/validate/fileheader ../project/ + - make deps + - make fmt + - make vet + - make test + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/extender/code/vendor/github.com/containerd/fifo/LICENSE b/extender/code/vendor/github.com/containerd/fifo/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/fifo/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/containerd/fifo/Makefile b/extender/code/vendor/github.com/containerd/fifo/Makefile new file mode 100644 index 000000000..96be48d48 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/fifo/Makefile @@ -0,0 +1,27 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: fmt vet test deps + +test: deps + go test -v -race -covermode=atomic -coverprofile=coverage.txt ./... + +deps: + go get -d -t ./... + +fmt: + gofmt -s -l . + +vet: + go vet ./... diff --git a/extender/code/vendor/github.com/containerd/fifo/errors.go b/extender/code/vendor/github.com/containerd/fifo/errors.go new file mode 100644 index 000000000..d2ff3e8b6 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/fifo/errors.go @@ -0,0 +1,30 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fifo + +import ( + "errors" +) + +var ( + ErrClosed = errors.New("fifo closed") + ErrCtrlClosed = errors.New("control of closed fifo") + ErrRdFrmWRONLY = errors.New("reading from write-only fifo") + ErrReadClosed = errors.New("reading from a closed fifo") + ErrWrToRDONLY = errors.New("writing to read-only fifo") + ErrWriteClosed = errors.New("writing to a closed fifo") +) diff --git a/extender/code/vendor/github.com/containerd/fifo/fifo.go b/extender/code/vendor/github.com/containerd/fifo/fifo.go new file mode 100644 index 000000000..7a2d18c74 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/fifo/fifo.go @@ -0,0 +1,236 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fifo + +import ( + "io" + "os" + "runtime" + "sync" + "syscall" + + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type fifo struct { + flag int + opened chan struct{} + closed chan struct{} + closing chan struct{} + err error + file *os.File + closingOnce sync.Once // close has been called + closedOnce sync.Once // fifo is closed + handle *handle +} + +var leakCheckWg *sync.WaitGroup + +// OpenFifo opens a fifo. Returns io.ReadWriteCloser. +// Context can be used to cancel this function until open(2) has not returned. +// Accepted flags: +// - syscall.O_CREAT - create new fifo if one doesn't exist +// - syscall.O_RDONLY - open fifo only from reader side +// - syscall.O_WRONLY - open fifo only from writer side +// - syscall.O_RDWR - open fifo from both sides, never block on syscall level +// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the +// fifo isn't open. read/write will be connected after the actual fifo is +// open or after fifo is closed. +func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) { + if _, err := os.Stat(fn); err != nil { + if os.IsNotExist(err) && flag&syscall.O_CREAT != 0 { + if err := mkfifo(fn, uint32(perm&os.ModePerm)); err != nil && !os.IsExist(err) { + return nil, errors.Wrapf(err, "error creating fifo %v", fn) + } + } else { + return nil, err + } + } + + block := flag&syscall.O_NONBLOCK == 0 || flag&syscall.O_RDWR != 0 + + flag &= ^syscall.O_CREAT + flag &= ^syscall.O_NONBLOCK + + h, err := getHandle(fn) + if err != nil { + return nil, err + } + + f := &fifo{ + handle: h, + flag: flag, + opened: make(chan struct{}), + closed: make(chan struct{}), + closing: make(chan struct{}), + } + + wg := leakCheckWg + if wg != nil { + wg.Add(2) + } + + go func() { + if wg != nil { + defer wg.Done() + } + select { + case <-ctx.Done(): + select { + case <-f.opened: + default: + f.Close() + } + case <-f.opened: + case <-f.closed: + } + }() + go func() { + if wg != nil { + defer wg.Done() + } + var file *os.File + fn, err := h.Path() + if err == nil { + file, err = os.OpenFile(fn, flag, 0) + } + select { + case <-f.closing: + if err == nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + err = errors.Errorf("fifo %v was closed before opening", h.Name()) + } + if file != nil { + file.Close() + } + } + default: + } + if err != nil { + f.closedOnce.Do(func() { + f.err = err + close(f.closed) + }) + return + } + f.file = file + close(f.opened) + }() + if block { + select { + case <-f.opened: + case <-f.closed: + return nil, f.err + } + } + return f, nil +} + +// Read from a fifo to a byte array. +func (f *fifo) Read(b []byte) (int, error) { + if f.flag&syscall.O_WRONLY > 0 { + return 0, ErrRdFrmWRONLY + } + select { + case <-f.opened: + return f.file.Read(b) + default: + } + select { + case <-f.opened: + return f.file.Read(b) + case <-f.closed: + return 0, ErrReadClosed + } +} + +// Write from byte array to a fifo. +func (f *fifo) Write(b []byte) (int, error) { + if f.flag&(syscall.O_WRONLY|syscall.O_RDWR) == 0 { + return 0, ErrWrToRDONLY + } + select { + case <-f.opened: + return f.file.Write(b) + default: + } + select { + case <-f.opened: + return f.file.Write(b) + case <-f.closed: + return 0, ErrWriteClosed + } +} + +// Close the fifo. Next reads/writes will error. This method can also be used +// before open(2) has returned and fifo was never opened. +func (f *fifo) Close() (retErr error) { + for { + select { + case <-f.closed: + f.handle.Close() + return + default: + select { + case <-f.opened: + f.closedOnce.Do(func() { + retErr = f.file.Close() + f.err = retErr + close(f.closed) + }) + default: + if f.flag&syscall.O_RDWR != 0 { + runtime.Gosched() + break + } + f.closingOnce.Do(func() { + close(f.closing) + }) + reverseMode := syscall.O_WRONLY + if f.flag&syscall.O_WRONLY > 0 { + reverseMode = syscall.O_RDONLY + } + fn, err := f.handle.Path() + // if Close() is called concurrently(shouldn't) it may cause error + // because handle is closed + select { + case <-f.closed: + default: + if err != nil { + // Path has become invalid. We will leak a goroutine. + // This case should not happen in linux. + f.closedOnce.Do(func() { + f.err = err + close(f.closed) + }) + <-f.closed + break + } + f, err := os.OpenFile(fn, reverseMode|syscall.O_NONBLOCK, 0) + if err == nil { + f.Close() + } + runtime.Gosched() + } + } + } + } +} diff --git a/extender/code/vendor/github.com/containerd/fifo/handle_linux.go b/extender/code/vendor/github.com/containerd/fifo/handle_linux.go new file mode 100644 index 000000000..6ac89b6a4 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/fifo/handle_linux.go @@ -0,0 +1,97 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fifo + +import ( + "fmt" + "os" + "sync" + "syscall" + + "github.com/pkg/errors" +) + +const O_PATH = 010000000 + +type handle struct { + f *os.File + fd uintptr + dev uint64 + ino uint64 + closeOnce sync.Once + name string +} + +func getHandle(fn string) (*handle, error) { + f, err := os.OpenFile(fn, O_PATH, 0) + if err != nil { + return nil, errors.Wrapf(err, "failed to open %v with O_PATH", fn) + } + + var ( + stat syscall.Stat_t + fd = f.Fd() + ) + if err := syscall.Fstat(int(fd), &stat); err != nil { + f.Close() + return nil, errors.Wrapf(err, "failed to stat handle %v", fd) + } + + h := &handle{ + f: f, + name: fn, + dev: uint64(stat.Dev), + ino: stat.Ino, + fd: fd, + } + + // check /proc just in case + if _, err := os.Stat(h.procPath()); err != nil { + f.Close() + return nil, errors.Wrapf(err, "couldn't stat %v", h.procPath()) + } + + return h, nil +} + +func (h *handle) procPath() string { + return fmt.Sprintf("/proc/self/fd/%d", h.fd) +} + +func (h *handle) Name() string { + return h.name +} + +func (h *handle) Path() (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(h.procPath(), &stat); err != nil { + return "", errors.Wrapf(err, "path %v could not be statted", h.procPath()) + } + if uint64(stat.Dev) != h.dev || stat.Ino != h.ino { + return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino) + } + return h.procPath(), nil +} + +func (h *handle) Close() error { + h.closeOnce.Do(func() { + h.f.Close() + }) + return nil +} diff --git a/extender/code/vendor/github.com/containerd/fifo/handle_nolinux.go b/extender/code/vendor/github.com/containerd/fifo/handle_nolinux.go new file mode 100644 index 000000000..4f2a282b2 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/fifo/handle_nolinux.go @@ -0,0 +1,65 @@ +// +build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fifo + +import ( + "syscall" + + "github.com/pkg/errors" +) + +type handle struct { + fn string + dev uint64 + ino uint64 +} + +func getHandle(fn string) (*handle, error) { + var stat syscall.Stat_t + if err := syscall.Stat(fn, &stat); err != nil { + return nil, errors.Wrapf(err, "failed to stat %v", fn) + } + + h := &handle{ + fn: fn, + dev: uint64(stat.Dev), + ino: uint64(stat.Ino), + } + + return h, nil +} + +func (h *handle) Path() (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(h.fn, &stat); err != nil { + return "", errors.Wrapf(err, "path %v could not be statted", h.fn) + } + if uint64(stat.Dev) != h.dev || uint64(stat.Ino) != h.ino { + return "", errors.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn) + } + return h.fn, nil +} + +func (h *handle) Name() string { + return h.fn +} + +func (h *handle) Close() error { + return nil +} diff --git a/extender/code/vendor/github.com/containerd/fifo/mkfifo_nosolaris.go b/extender/code/vendor/github.com/containerd/fifo/mkfifo_nosolaris.go new file mode 100644 index 000000000..2799a06d1 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/fifo/mkfifo_nosolaris.go @@ -0,0 +1,25 @@ +// +build !solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fifo + +import "syscall" + +func mkfifo(path string, mode uint32) (err error) { + return syscall.Mkfifo(path, mode) +} diff --git a/extender/code/vendor/github.com/containerd/fifo/mkfifo_solaris.go b/extender/code/vendor/github.com/containerd/fifo/mkfifo_solaris.go new file mode 100644 index 000000000..1ecd722ae --- /dev/null +++ b/extender/code/vendor/github.com/containerd/fifo/mkfifo_solaris.go @@ -0,0 +1,27 @@ +// +build solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fifo + +import ( + "golang.org/x/sys/unix" +) + +func mkfifo(path string, mode uint32) (err error) { + return unix.Mkfifo(path, mode) +} diff --git a/extender/code/vendor/github.com/containerd/fifo/raw.go b/extender/code/vendor/github.com/containerd/fifo/raw.go new file mode 100644 index 000000000..27b8976cd --- /dev/null +++ b/extender/code/vendor/github.com/containerd/fifo/raw.go @@ -0,0 +1,114 @@ +// +build go1.12 + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fifo + +import ( + "syscall" +) + +// SyscallConn provides raw access to the fifo's underlying filedescrptor. +// See syscall.Conn for guarentees provided by this interface. +func (f *fifo) SyscallConn() (syscall.RawConn, error) { + // deterministic check for closed + select { + case <-f.closed: + return nil, ErrClosed + default: + } + + select { + case <-f.closed: + return nil, ErrClosed + case <-f.opened: + return f.file.SyscallConn() + default: + } + + // Not opened and not closed, this means open is non-blocking AND it's not open yet + // Use rawConn to deal with non-blocking open. + rc := &rawConn{f: f, ready: make(chan struct{})} + go func() { + select { + case <-f.closed: + return + case <-f.opened: + rc.raw, rc.err = f.file.SyscallConn() + close(rc.ready) + } + }() + + return rc, nil +} + +type rawConn struct { + f *fifo + ready chan struct{} + raw syscall.RawConn + err error +} + +func (r *rawConn) Control(f func(fd uintptr)) error { + select { + case <-r.f.closed: + return ErrCtrlClosed + case <-r.ready: + } + + if r.err != nil { + return r.err + } + + return r.raw.Control(f) +} + +func (r *rawConn) Read(f func(fd uintptr) (done bool)) error { + if r.f.flag&syscall.O_WRONLY > 0 { + return ErrRdFrmWRONLY + } + + select { + case <-r.f.closed: + return ErrReadClosed + case <-r.ready: + } + + if r.err != nil { + return r.err + } + + return r.raw.Read(f) +} + +func (r *rawConn) Write(f func(fd uintptr) (done bool)) error { + if r.f.flag&(syscall.O_WRONLY|syscall.O_RDWR) == 0 { + return ErrWrToRDONLY + } + + select { + case <-r.f.closed: + return ErrWriteClosed + case <-r.ready: + } + + if r.err != nil { + return r.err + } + + return r.raw.Write(f) +} diff --git a/extender/code/vendor/github.com/containerd/fifo/readme.md b/extender/code/vendor/github.com/containerd/fifo/readme.md new file mode 100644 index 000000000..30e233cc6 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/fifo/readme.md @@ -0,0 +1,44 @@ +### fifo + +[![Build Status](https://travis-ci.org/containerd/fifo.svg?branch=master)](https://travis-ci.org/containerd/fifo) +[![codecov](https://codecov.io/gh/containerd/fifo/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/fifo) + +Go package for handling fifos in a sane way. + +``` +// OpenFifo opens a fifo. Returns io.ReadWriteCloser. +// Context can be used to cancel this function until open(2) has not returned. +// Accepted flags: +// - syscall.O_CREAT - create new fifo if one doesn't exist +// - syscall.O_RDONLY - open fifo only from reader side +// - syscall.O_WRONLY - open fifo only from writer side +// - syscall.O_RDWR - open fifo from both sides, never block on syscall level +// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the +// fifo isn't open. read/write will be connected after the actual fifo is +// open or after fifo is closed. +func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) + + +// Read from a fifo to a byte array. +func (f *fifo) Read(b []byte) (int, error) + + +// Write from byte array to a fifo. +func (f *fifo) Write(b []byte) (int, error) + + +// Close the fifo. Next reads/writes will error. This method can also be used +// before open(2) has returned and fifo was never opened. +func (f *fifo) Close() error +``` + +## Project details + +The fifo is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + + * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go new file mode 100644 index 000000000..4ae47fcf9 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -0,0 +1,595 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "runtime" + "strings" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +type options struct { + chunkSize int + compressionLevel int + prioritizedFiles []string + missedPrioritizedFiles *[]string +} + +type Option func(o *options) error + +// WithChunkSize option specifies the chunk size of eStargz blob to build. +func WithChunkSize(chunkSize int) Option { + return func(o *options) error { + o.chunkSize = chunkSize + return nil + } +} + +// WithCompressionLevel option specifies the gzip compression level. +// The default is gzip.BestCompression. +// See also: https://godoc.org/compress/gzip#pkg-constants +func WithCompressionLevel(level int) Option { + return func(o *options) error { + o.compressionLevel = level + return nil + } +} + +// WithPrioritizedFiles option specifies the list of prioritized files. +// These files must be complete paths that are absolute or relative to "/" +// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar" +// are treated as "/foo/bar". +func WithPrioritizedFiles(files []string) Option { + return func(o *options) error { + o.prioritizedFiles = files + return nil + } +} + +// WithAllowPrioritizeNotFound makes Build continue the execution even if some +// of prioritized files specified by WithPrioritizedFiles option aren't found +// in the input tar. Instead, this records all missed file names to the passed +// slice. +func WithAllowPrioritizeNotFound(missedFiles *[]string) Option { + return func(o *options) error { + if missedFiles == nil { + return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed") + } + o.missedPrioritizedFiles = missedFiles + return nil + } +} + +// Blob is an eStargz blob. +type Blob struct { + io.ReadCloser + diffID digest.Digester + tocDigest digest.Digest +} + +// DiffID returns the digest of uncompressed blob. +// It is only valid to call DiffID after Close. +func (b *Blob) DiffID() digest.Digest { + return b.diffID.Digest() +} + +// TOCDigest returns the digest of uncompressed TOC JSON. +func (b *Blob) TOCDigest() digest.Digest { + return b.tocDigest +} + +// Build builds an eStargz blob which is an extended version of stargz, from tar blob passed +// through the argument. If there are some prioritized files are listed in the option, these +// files are grouped as "prioritized" and can be used for runtime optimization (e.g. prefetch). +// This function builds a blob in parallel, with dividing that blob into several (at least the +// number of runtime.GOMAXPROCS(0)) sub-blobs. +func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { + var opts options + opts.compressionLevel = gzip.BestCompression // BestCompression by default + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + layerFiles := newTempFiles() + defer func() { + if rErr != nil { + if err := layerFiles.CleanupAll(); err != nil { + rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err) + } + } + }() + entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles) + if err != nil { + return nil, err + } + tarParts := divideEntries(entries, runtime.GOMAXPROCS(0)) + writers := make([]*Writer, len(tarParts)) + payloads := make([]*os.File, len(tarParts)) + var mu sync.Mutex + var eg errgroup.Group + for i, parts := range tarParts { + i, parts := i, parts + // builds verifiable stargz sub-blobs + eg.Go(func() error { + esgzFile, err := layerFiles.TempFile("", "esgzdata") + if err != nil { + return err + } + sw := NewWriterLevel(esgzFile, opts.compressionLevel) + sw.ChunkSize = opts.chunkSize + if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { + return err + } + mu.Lock() + writers[i] = sw + payloads[i] = esgzFile + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + rErr = err + return nil, err + } + tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...) + if err != nil { + rErr = err + return nil, err + } + var rs []io.Reader + for _, p := range payloads { + fs, err := fileSectionReader(p) + if err != nil { + return nil, err + } + rs = append(rs, fs) + } + diffID := digest.Canonical.Digester() + pr, pw := io.Pipe() + go func() { + r, err := gzip.NewReader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + if err != nil { + pw.CloseWithError(err) + return + } + if _, err := io.Copy(diffID.Hash(), r); err != nil { + pw.CloseWithError(err) + return + } + pw.Close() + }() + return &Blob{ + ReadCloser: readCloser{ + Reader: pr, + closeFunc: layerFiles.CleanupAll, + }, + tocDigest: tocDgst, + diffID: diffID, + }, nil +} + +// closeWithCombine takes unclosed Writers and close them. This also returns the +// toc that combined all Writers into. +// Writers doesn't write TOC and footer to the underlying writers so they can be +// combined into a single eStargz and tocAndFooter returned by this function can +// be appended at the tail of that combined blob. +func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooter io.Reader, tocDgst digest.Digest, err error) { + if len(ws) == 0 { + return nil, "", fmt.Errorf("at least one writer must be passed") + } + for _, w := range ws { + if w.closed { + return nil, "", fmt.Errorf("writer must be unclosed") + } + defer func(w *Writer) { w.closed = true }(w) + if err := w.closeGz(); err != nil { + return nil, "", err + } + if err := w.bw.Flush(); err != nil { + return nil, "", err + } + } + var ( + mtoc = new(jtoc) + currentOffset int64 + ) + mtoc.Version = ws[0].toc.Version + for _, w := range ws { + for _, e := range w.toc.Entries { + // Recalculate Offset of non-empty files/chunks + if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" { + e.Offset += currentOffset + } + mtoc.Entries = append(mtoc.Entries, e) + } + if w.toc.Version > mtoc.Version { + mtoc.Version = w.toc.Version + } + currentOffset += w.cw.n + } + + tocJSON, err := json.MarshalIndent(mtoc, "", "\t") + if err != nil { + return nil, "", err + } + pr, pw := io.Pipe() + go func() { + zw, _ := gzip.NewWriterLevel(pw, compressionLevel) + tw := tar.NewWriter(zw) + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: TOCTarName, + Size: int64(len(tocJSON)), + }); err != nil { + pw.CloseWithError(err) + return + } + if _, err := tw.Write(tocJSON); err != nil { + pw.CloseWithError(err) + return + } + if err := tw.Close(); err != nil { + pw.CloseWithError(err) + return + } + if err := zw.Close(); err != nil { + pw.CloseWithError(err) + return + } + pw.Close() + }() + return io.MultiReader( + pr, + bytes.NewReader(footerBytes(currentOffset)), + ), digest.FromBytes(tocJSON), nil +} + +// divideEntries divides passed entries to the parts at least the number specified by the +// argument. +func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) { + var estimatedSize int64 + for _, e := range entries { + estimatedSize += e.header.Size + } + unitSize := estimatedSize / int64(minPartsNum) + var ( + nextEnd = unitSize + offset int64 + ) + set = append(set, []*entry{}) + for _, e := range entries { + set[len(set)-1] = append(set[len(set)-1], e) + offset += e.header.Size + if offset > nextEnd { + set = append(set, []*entry{}) + nextEnd += unitSize + } + } + return +} + +var errNotFound = errors.New("not found") + +// sortEntries reads the specified tar blob and returns a list of tar entries. +// If some of prioritized files are specified, the list starts from these +// files with keeping the order specified by the argument. +func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) { + + // Import tar file. + intar, err := importTar(in) + if err != nil { + return nil, errors.Wrap(err, "failed to sort") + } + + // Sort the tar file respecting to the prioritized files list. + sorted := &tarFile{} + for _, l := range prioritized { + if err := moveRec(l, intar, sorted); err != nil { + if errors.Is(err, errNotFound) && missedPrioritized != nil { + *missedPrioritized = append(*missedPrioritized, l) + continue // allow not found + } + return nil, errors.Wrap(err, "failed to sort tar entries") + } + } + if len(prioritized) == 0 { + sorted.add(&entry{ + header: &tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } else { + sorted.add(&entry{ + header: &tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } + + // Dump all entry and concatinate them. + return append(sorted.dump(), intar.dump()...), nil +} + +// readerFromEntries returns a reader of tar archive that contains entries passed +// through the arguments. +func readerFromEntries(entries ...*entry) io.Reader { + pr, pw := io.Pipe() + go func() { + tw := tar.NewWriter(pw) + defer tw.Close() + for _, entry := range entries { + if err := tw.WriteHeader(entry.header); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + return + } + if _, err := io.Copy(tw, entry.payload); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + return + } + } + pw.Close() + }() + return pr +} + +func importTar(in io.ReaderAt) (*tarFile, error) { + tf := &tarFile{} + pw, err := newCountReader(in) + if err != nil { + return nil, errors.Wrap(err, "failed to make position watcher") + } + tr := tar.NewReader(pw) + + // Walk through all nodes. + for { + // Fetch and parse next header. + h, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } else { + return nil, errors.Wrap(err, "failed to parse tar file") + } + } + switch cleanEntryName(h.Name) { + case PrefetchLandmark, NoPrefetchLandmark: + // Ignore existing landmark + continue + } + + // Add entry. If it already exists, replace it. + if _, ok := tf.get(h.Name); ok { + tf.remove(h.Name) + } + tf.add(&entry{ + header: h, + payload: io.NewSectionReader(in, pw.currentPos(), h.Size), + }) + } + + return tf, nil +} + +func moveRec(name string, in *tarFile, out *tarFile) error { + name = cleanEntryName(name) + if name == "" { // root directory. stop recursion. + if e, ok := in.get(name); ok { + // entry of the root directory exists. we should move it as well. + // this case will occur if tar entries are prefixed with "./", "/", etc. + out.add(e) + in.remove(name) + } + return nil + } + + _, okIn := in.get(name) + _, okOut := out.get(name) + if !okIn && !okOut { + return errors.Wrapf(errNotFound, "file: %q", name) + } + + parent, _ := path.Split(strings.TrimSuffix(name, "/")) + if err := moveRec(parent, in, out); err != nil { + return err + } + if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { + if err := moveRec(e.header.Linkname, in, out); err != nil { + return err + } + } + if e, ok := in.get(name); ok { + out.add(e) + in.remove(name) + } + return nil +} + +type entry struct { + header *tar.Header + payload io.ReadSeeker +} + +type tarFile struct { + index map[string]*entry + stream []*entry +} + +func (f *tarFile) add(e *entry) { + if f.index == nil { + f.index = make(map[string]*entry) + } + f.index[cleanEntryName(e.header.Name)] = e + f.stream = append(f.stream, e) +} + +func (f *tarFile) remove(name string) { + name = cleanEntryName(name) + if f.index != nil { + delete(f.index, name) + } + var filtered []*entry + for _, e := range f.stream { + if cleanEntryName(e.header.Name) == name { + continue + } + filtered = append(filtered, e) + } + f.stream = filtered +} + +func (f *tarFile) get(name string) (e *entry, ok bool) { + if f.index == nil { + return nil, false + } + e, ok = f.index[cleanEntryName(name)] + return +} + +func (f *tarFile) dump() []*entry { + return f.stream +} + +type readCloser struct { + io.Reader + closeFunc func() error +} + +func (rc readCloser) Close() error { + return rc.closeFunc() +} + +func fileSectionReader(file *os.File) (*io.SectionReader, error) { + info, err := file.Stat() + if err != nil { + return nil, err + } + return io.NewSectionReader(file, 0, info.Size()), nil +} + +func newTempFiles() *tempFiles { + return &tempFiles{} +} + +type tempFiles struct { + files []*os.File + filesMu sync.Mutex +} + +func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { + f, err := ioutil.TempFile(dir, pattern) + if err != nil { + return nil, err + } + tf.filesMu.Lock() + tf.files = append(tf.files, f) + tf.filesMu.Unlock() + return f, nil +} + +func (tf *tempFiles) CleanupAll() error { + tf.filesMu.Lock() + defer tf.filesMu.Unlock() + var allErr []error + for _, f := range tf.files { + if err := f.Close(); err != nil { + allErr = append(allErr, err) + } + if err := os.Remove(f.Name()); err != nil { + allErr = append(allErr, err) + } + } + tf.files = nil + return errorutil.Aggregate(allErr) +} + +func newCountReader(r io.ReaderAt) (*countReader, error) { + pos := int64(0) + return &countReader{r: r, cPos: &pos}, nil +} + +type countReader struct { + r io.ReaderAt + cPos *int64 + + mu sync.Mutex +} + +func (cr *countReader) Read(p []byte) (int, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + n, err := cr.r.ReadAt(p, *cr.cPos) + if err == nil { + *cr.cPos += int64(n) + } + return n, err +} + +func (cr *countReader) Seek(offset int64, whence int) (int64, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + switch whence { + default: + return 0, fmt.Errorf("Unknown whence: %v", whence) + case io.SeekStart: + case io.SeekCurrent: + offset += *cr.cPos + case io.SeekEnd: + return 0, fmt.Errorf("Unsupported whence: %v", whence) + } + + if offset < 0 { + return 0, fmt.Errorf("invalid offset") + } + *cr.cPos = offset + return offset, nil +} + +func (cr *countReader) currentPos() int64 { + cr.mu.Lock() + defer cr.mu.Unlock() + + return *cr.cPos +} diff --git a/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go new file mode 100644 index 000000000..6de78b02d --- /dev/null +++ b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errorutil + +import ( + "errors" + "fmt" + "strings" +) + +// Aggregate combines a list of errors into a single new error. +func Aggregate(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + points := make([]string, len(errs)+1) + points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs)) + for i, err := range errs { + points[i+1] = fmt.Sprintf("* %s", err) + } + return errors.New(strings.Join(points, "\n\t")) + } +} diff --git a/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go new file mode 100644 index 000000000..c45a7aaca --- /dev/null +++ b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -0,0 +1,849 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "path" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// A Reader permits random access reads from a stargz file. +type Reader struct { + sr *io.SectionReader + toc *jtoc + tocDigest digest.Digest + + // m stores all non-chunk entries, keyed by name. + m map[string]*TOCEntry + + // chunks stores all TOCEntry values for regular files that + // are split up. For a file with a single chunk, it's only + // stored in m. + chunks map[string][]*TOCEntry +} + +// Open opens a stargz file for reading. +// +// Note that each entry name is normalized as the path that is relative to root. +func Open(sr *io.SectionReader) (*Reader, error) { + tocOff, footerSize, err := OpenFooter(sr) + if err != nil { + return nil, errors.Wrapf(err, "error parsing footer") + } + tocTargz := make([]byte, sr.Size()-tocOff-footerSize) + if _, err := sr.ReadAt(tocTargz, tocOff); err != nil { + return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocTargz), err) + } + zr, err := gzip.NewReader(bytes.NewReader(tocTargz)) + if err != nil { + return nil, fmt.Errorf("malformed TOC gzip header: %v", err) + } + zr.Multistream(false) + tr := tar.NewReader(zr) + h, err := tr.Next() + if err != nil { + return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) + } + if h.Name != TOCTarName { + return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) + } + dgstr := digest.Canonical.Digester() + toc := new(jtoc) + if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { + return nil, fmt.Errorf("error decoding TOC JSON: %v", err) + } + r := &Reader{sr: sr, toc: toc, tocDigest: dgstr.Digest()} + if err := r.initFields(); err != nil { + return nil, fmt.Errorf("failed to initialize fields of entries: %v", err) + } + return r, nil +} + +// OpenFooter extracts and parses footer from the given blob. +func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) { + if sr.Size() < FooterSize && sr.Size() < legacyFooterSize { + return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size()) + } + // TODO: read a bigger chunk (1MB?) at once here to hopefully + // get the TOC + footer in one go. + var footer [FooterSize]byte + if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil { + return 0, 0, fmt.Errorf("error reading footer: %v", err) + } + return parseFooter(footer[:]) +} + +// initFields populates the Reader from r.toc after decoding it from +// JSON. +// +// Unexported fields are populated and TOCEntry fields that were +// implicit in the JSON are populated. +func (r *Reader) initFields() error { + r.m = make(map[string]*TOCEntry, len(r.toc.Entries)) + r.chunks = make(map[string][]*TOCEntry) + var lastPath string + uname := map[int]string{} + gname := map[int]string{} + var lastRegEnt *TOCEntry + for _, ent := range r.toc.Entries { + ent.Name = cleanEntryName(ent.Name) + if ent.Type == "reg" { + lastRegEnt = ent + } + if ent.Type == "chunk" { + ent.Name = lastPath + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + if ent.ChunkSize == 0 && lastRegEnt != nil { + ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset + } + } else { + lastPath = ent.Name + + if ent.Uname != "" { + uname[ent.UID] = ent.Uname + } else { + ent.Uname = uname[ent.UID] + } + if ent.Gname != "" { + gname[ent.GID] = ent.Gname + } else { + ent.Gname = uname[ent.GID] + } + + ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339) + + if ent.Type == "dir" { + ent.NumLink++ // Parent dir links to this directory + } + r.m[ent.Name] = ent + } + if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size { + r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1) + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + } + if ent.ChunkSize == 0 && ent.Size != 0 { + ent.ChunkSize = ent.Size + } + } + + // Populate children, add implicit directories: + for _, ent := range r.toc.Entries { + if ent.Type == "chunk" { + continue + } + // add "foo/": + // add "foo" child to "" (creating "" if necessary) + // + // add "foo/bar/": + // add "bar" child to "foo" (creating "foo" if necessary) + // + // add "foo/bar.txt": + // add "bar.txt" child to "foo" (creating "foo" if necessary) + // + // add "a/b/c/d/e/f.txt": + // create "a/b/c/d/e" node + // add "f.txt" child to "e" + + name := ent.Name + pdirName := parentDir(name) + if name == pdirName { + // This entry and its parent are the same. + // Ignore this for avoiding infinite loop of the reference. + // The example case where this can occur is when tar contains the root + // directory itself (e.g. "./", "/"). + continue + } + pdir := r.getOrCreateDir(pdirName) + ent.NumLink++ // at least one name(ent.Name) references this entry. + if ent.Type == "hardlink" { + if org, ok := r.m[cleanEntryName(ent.LinkName)]; ok { + org.NumLink++ // original entry is referenced by this ent.Name. + ent = org + } else { + return fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) + } + } + pdir.addChild(path.Base(name), ent) + } + + lastOffset := r.sr.Size() + for i := len(r.toc.Entries) - 1; i >= 0; i-- { + e := r.toc.Entries[i] + if e.isDataType() { + e.nextOffset = lastOffset + } + if e.Offset != 0 { + lastOffset = e.Offset + } + } + + return nil +} + +func parentDir(p string) string { + dir, _ := path.Split(p) + return strings.TrimSuffix(dir, "/") +} + +func (r *Reader) getOrCreateDir(d string) *TOCEntry { + e, ok := r.m[d] + if !ok { + e = &TOCEntry{ + Name: d, + Type: "dir", + Mode: 0755, + NumLink: 2, // The directory itself(.) and the parent link to this directory. + } + r.m[d] = e + if d != "" { + pdir := r.getOrCreateDir(parentDir(d)) + pdir.addChild(path.Base(d), e) + } + } + return e +} + +// VerifyTOC checks that the TOC JSON in the passed blob matches the +// passed digests and that the TOC JSON contains digests for all chunks +// contained in the blob. If the verification succceeds, this function +// returns TOCEntryVerifier which holds all chunk digests in the stargz blob. +func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) { + // Verify the digest of TOC JSON + if r.tocDigest != tocDigest { + return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest) + } + digestMap := make(map[int64]digest.Digest) // map from chunk offset to the digest + for _, e := range r.toc.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if e.Type == "reg" && e.Size == 0 { + continue // ignores empty file + } + + // offset must be unique in stargz blob + if _, ok := digestMap[e.Offset]; ok { + return nil, fmt.Errorf("offset %d found twice", e.Offset) + } + + // all chunk entries must contain digest + if e.ChunkDigest == "" { + return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON", + e.Name, e.Offset) + } + + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse digest %q", e.ChunkDigest) + } + digestMap[e.Offset] = d + } + } + + return &verifier{digestMap: digestMap}, nil +} + +// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by +// offset of the chunk. +type verifier struct { + digestMap map[int64]digest.Digest + digestMapMu sync.Mutex +} + +// Verifier returns a content verifier specified by TOCEntry. +func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) { + v.digestMapMu.Lock() + defer v.digestMapMu.Unlock() + d, ok := v.digestMap[ce.Offset] + if !ok { + return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered", + ce.Offset, ce.ChunkSize) + } + return d.Verifier(), nil +} + +// ChunkEntryForOffset returns the TOCEntry containing the byte of the +// named file at the given offset within the file. +// Name must be absolute path or one that is relative to root. +func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) { + name = cleanEntryName(name) + e, ok = r.Lookup(name) + if !ok || !e.isDataType() { + return nil, false + } + ents := r.chunks[name] + if len(ents) < 2 { + if offset >= e.ChunkSize { + return nil, false + } + return e, true + } + i := sort.Search(len(ents), func(i int) bool { + e := ents[i] + return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize) + }) + if i == len(ents) { + return nil, false + } + return ents[i], true +} + +// Lookup returns the Table of Contents entry for the given path. +// +// To get the root directory, use the empty string. +// Path must be absolute path or one that is relative to root. +func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { + path = cleanEntryName(path) + if r == nil { + return + } + e, ok = r.m[path] + if ok && e.Type == "hardlink" { + e, ok = r.m[e.LinkName] + } + return +} + +// OpenFile returns the reader of the specified file payload. +// +// Name must be absolute path or one that is relative to root. +func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + name = cleanEntryName(name) + ent, ok := r.Lookup(name) + if !ok { + // TODO: come up with some error plan. This is lazy: + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: os.ErrNotExist, + } + } + if ent.Type != "reg" { + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: errors.New("not a regular file"), + } + } + fr := &fileReader{ + r: r, + size: ent.Size, + ents: r.getChunks(ent), + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { + if ents, ok := r.chunks[ent.Name]; ok { + return ents + } + return []*TOCEntry{ent} +} + +type fileReader struct { + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries +} + +func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { + if off >= fr.size { + return 0, io.EOF + } + if off < 0 { + return 0, errors.New("invalid offset") + } + var i int + if len(fr.ents) > 1 { + i = sort.Search(len(fr.ents), func(i int) bool { + return fr.ents[i].ChunkOffset >= off + }) + if i == len(fr.ents) { + i = len(fr.ents) - 1 + } + } + ent := fr.ents[i] + if ent.ChunkOffset > off { + if i == 0 { + return 0, errors.New("internal error; first chunk offset is non-zero") + } + ent = fr.ents[i-1] + } + + // If ent is a chunk of a large file, adjust the ReadAt + // offset by the chunk's offset. + off -= ent.ChunkOffset + + finalEnt := fr.ents[len(fr.ents)-1] + gzOff := ent.Offset + // gzBytesRemain is the number of compressed gzip bytes in this + // file remaining, over 1+ gzip chunks. + gzBytesRemain := finalEnt.NextOffset() - gzOff + + sr := io.NewSectionReader(fr.r.sr, gzOff, gzBytesRemain) + + const maxGZread = 2 << 20 + var bufSize = maxGZread + if gzBytesRemain < maxGZread { + bufSize = int(gzBytesRemain) + } + + br := bufio.NewReaderSize(sr, bufSize) + if _, err := br.Peek(bufSize); err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err) + } + + gz, err := gzip.NewReader(br) + if err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.gzipNewReader: %v", err) + } + if n, err := io.CopyN(ioutil.Discard, gz, off); n != off || err != nil { + return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) + } + return io.ReadFull(gz, p) +} + +// A Writer writes stargz files. +// +// Use NewWriter to create a new Writer. +type Writer struct { + bw *bufio.Writer + cw *countWriter + toc *jtoc + diffHash hash.Hash // SHA-256 of uncompressed tar + + closed bool + gz *gzip.Writer + lastUsername map[int]string + lastGroupname map[int]string + compressionLevel int + + // ChunkSize optionally controls the maximum number of bytes + // of data of a regular file that can be written in one gzip + // stream before a new gzip stream is started. + // Zero means to use a default, currently 4 MiB. + ChunkSize int +} + +// currentGzipWriter writes to the current w.gz field, which can +// change throughout writing a tar entry. +// +// Additionally, it updates w's SHA-256 of the uncompressed bytes +// of the tar file. +type currentGzipWriter struct{ w *Writer } + +func (cgw currentGzipWriter) Write(p []byte) (int, error) { + cgw.w.diffHash.Write(p) + return cgw.w.gz.Write(p) +} + +func (w *Writer) chunkSize() int { + if w.ChunkSize <= 0 { + return 4 << 20 + } + return w.ChunkSize +} + +// NewWriter returns a new stargz writer writing to w. +// +// The writer must be closed to write its trailing table of contents. +func NewWriter(w io.Writer) *Writer { + return NewWriterLevel(w, gzip.BestCompression) +} + +// NewWriterLevel returns a new stargz writer writing to w. +// The compression level is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterLevel(w io.Writer, compressionLevel int) *Writer { + bw := bufio.NewWriter(w) + cw := &countWriter{w: bw} + return &Writer{ + bw: bw, + cw: cw, + toc: &jtoc{Version: 1}, + diffHash: sha256.New(), + compressionLevel: compressionLevel, + } +} + +// Close writes the stargz's table of contents and flushes all the +// buffers, returning any error. +func (w *Writer) Close() (digest.Digest, error) { + if w.closed { + return "", nil + } + defer func() { w.closed = true }() + + if err := w.closeGz(); err != nil { + return "", err + } + + // Write the TOC index. + tocOff := w.cw.n + w.gz, _ = gzip.NewWriterLevel(w.cw, w.compressionLevel) + tw := tar.NewWriter(currentGzipWriter{w}) + tocJSON, err := json.MarshalIndent(w.toc, "", "\t") + if err != nil { + return "", err + } + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: TOCTarName, + Size: int64(len(tocJSON)), + }); err != nil { + return "", err + } + if _, err := tw.Write(tocJSON); err != nil { + return "", err + } + + if err := tw.Close(); err != nil { + return "", err + } + if err := w.closeGz(); err != nil { + return "", err + } + + // And a little footer with pointer to the TOC gzip stream. + if _, err := w.bw.Write(footerBytes(tocOff)); err != nil { + return "", err + } + + if err := w.bw.Flush(); err != nil { + return "", err + } + + return digest.FromBytes(tocJSON), nil +} + +func (w *Writer) closeGz() error { + if w.closed { + return errors.New("write on closed Writer") + } + if w.gz != nil { + if err := w.gz.Close(); err != nil { + return err + } + w.gz = nil + } + return nil +} + +// nameIfChanged returns name, unless it was the already the value of (*mp)[id], +// in which case it returns the empty string. +func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { + if name == "" { + return "" + } + if *mp == nil { + *mp = make(map[int]string) + } + if (*mp)[id] == name { + return "" + } + (*mp)[id] = name + return name +} + +func (w *Writer) condOpenGz() { + if w.gz == nil { + w.gz, _ = gzip.NewWriterLevel(w.cw, w.compressionLevel) + } +} + +// AppendTar reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be gzip compressed. +func (w *Writer) AppendTar(r io.Reader) error { + br := bufio.NewReader(r) + var tr *tar.Reader + if isGzip(br) { + // NewReader can't fail if isGzip returned true. + zr, _ := gzip.NewReader(br) + tr = tar.NewReader(zr) + } else { + tr = tar.NewReader(br) + } + for { + h, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err) + } + if h.Name == TOCTarName { + // It is possible for a layer to be "stargzified" twice during the + // distribution lifecycle. So we reserve "TOCTarName" here to avoid + // duplicated entries in the resulting layer. + continue + } + + xattrs := make(map[string][]byte) + const xattrPAXRecordsPrefix = "SCHILY.xattr." + if h.PAXRecords != nil { + for k, v := range h.PAXRecords { + if strings.HasPrefix(k, xattrPAXRecordsPrefix) { + xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v) + } + } + } + ent := &TOCEntry{ + Name: h.Name, + Mode: h.Mode, + UID: h.Uid, + GID: h.Gid, + Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname), + Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname), + ModTime3339: formatModtime(h.ModTime), + Xattrs: xattrs, + } + w.condOpenGz() + tw := tar.NewWriter(currentGzipWriter{w}) + if err := tw.WriteHeader(h); err != nil { + return err + } + switch h.Typeflag { + case tar.TypeLink: + ent.Type = "hardlink" + ent.LinkName = h.Linkname + case tar.TypeSymlink: + ent.Type = "symlink" + ent.LinkName = h.Linkname + case tar.TypeDir: + ent.Type = "dir" + case tar.TypeReg: + ent.Type = "reg" + ent.Size = h.Size + case tar.TypeChar: + ent.Type = "char" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeBlock: + ent.Type = "block" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeFifo: + ent.Type = "fifo" + default: + return fmt.Errorf("unsupported input tar entry %q", h.Typeflag) + } + + // We need to keep a reference to the TOC entry for regular files, so that we + // can fill the digest later. + var regFileEntry *TOCEntry + var payloadDigest digest.Digester + if h.Typeflag == tar.TypeReg { + regFileEntry = ent + payloadDigest = digest.Canonical.Digester() + } + + if h.Typeflag == tar.TypeReg && ent.Size > 0 { + var written int64 + totalSize := ent.Size // save it before we destroy ent + tee := io.TeeReader(tr, payloadDigest.Hash()) + for written < totalSize { + if err := w.closeGz(); err != nil { + return err + } + + chunkSize := int64(w.chunkSize()) + remain := totalSize - written + if remain < chunkSize { + chunkSize = remain + } else { + ent.ChunkSize = chunkSize + } + ent.Offset = w.cw.n + ent.ChunkOffset = written + chunkDigest := digest.Canonical.Digester() + + w.condOpenGz() + + teeChunk := io.TeeReader(tee, chunkDigest.Hash()) + if _, err := io.CopyN(tw, teeChunk, chunkSize); err != nil { + return fmt.Errorf("error copying %q: %v", h.Name, err) + } + ent.ChunkDigest = chunkDigest.Digest().String() + w.toc.Entries = append(w.toc.Entries, ent) + written += chunkSize + ent = &TOCEntry{ + Name: h.Name, + Type: "chunk", + } + } + } else { + w.toc.Entries = append(w.toc.Entries, ent) + } + if payloadDigest != nil { + regFileEntry.Digest = payloadDigest.Digest().String() + } + if err := tw.Flush(); err != nil { + return err + } + } + return nil +} + +// DiffID returns the SHA-256 of the uncompressed tar bytes. +// It is only valid to call DiffID after Close. +func (w *Writer) DiffID() string { + return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil)) +} + +// footerBytes returns the 51 bytes footer. +func footerBytes(tocOff int64) []byte { + buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) + gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes + + // Extra header indicating the offset of TOCJSON + // https://tools.ietf.org/html/rfc1952#section-2.3.1.1 + header := make([]byte, 4) + header[0], header[1] = 'S', 'G' + subfield := fmt.Sprintf("%016xSTARGZ", tocOff) + binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 + gz.Header.Extra = append(header, []byte(subfield)...) + gz.Close() + if buf.Len() != FooterSize { + panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) + } + return buf.Bytes() +} + +func parseFooter(p []byte) (tocOffset int64, footerSize int64, rErr error) { + var allErr []error + + tocOffset, err := parseEStargzFooter(p) + if err == nil { + return tocOffset, FooterSize, nil + } + allErr = append(allErr, err) + + pad := len(p) - legacyFooterSize + if pad < 0 { + pad = 0 + } + tocOffset, err = parseLegacyFooter(p[pad:]) + if err == nil { + return tocOffset, legacyFooterSize, nil + } + return 0, 0, errorutil.Aggregate(append(allErr, err)) +} + +func parseEStargzFooter(p []byte) (tocOffset int64, err error) { + if len(p) != FooterSize { + return 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, err + } + extra := zr.Header.Extra + si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] + if si1 != 'S' || si2 != 'G' { + return 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) + } + if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) { + return 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) + } + if string(subfield[16:]) != "STARGZ" { + return 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") + } + return strconv.ParseInt(string(subfield[:16]), 16, 64) +} + +func parseLegacyFooter(p []byte) (tocOffset int64, err error) { + if len(p) != legacyFooterSize { + return 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader") + } + extra := zr.Header.Extra + if len(extra) != 16+len("STARGZ") { + return 0, fmt.Errorf("legacy: invalid stargz's extra field size") + } + if string(extra[16:]) != "STARGZ" { + return 0, fmt.Errorf("legacy: magic string STARGZ not found") + } + return strconv.ParseInt(string(extra[:16]), 16, 64) +} + +func formatModtime(t time.Time) string { + if t.IsZero() || t.Unix() == 0 { + return "" + } + return t.UTC().Round(time.Second).Format(time.RFC3339) +} + +func cleanEntryName(name string) string { + // Use path.Clean to consistently deal with path separators across platforms. + return strings.TrimPrefix(path.Clean("/"+name), "/") +} + +// countWriter counts how many bytes have been written to its wrapped +// io.Writer. +type countWriter struct { + w io.Writer + n int64 +} + +func (cw *countWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + return +} + +// isGzip reports whether br is positioned right before an upcoming gzip stream. +// It does not consume any bytes from br. +func isGzip(br *bufio.Reader) bool { + const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + ) + peek, _ := br.Peek(3) + return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate +} diff --git a/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod new file mode 100644 index 000000000..a40070443 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod @@ -0,0 +1,9 @@ +module github.com/containerd/stargz-snapshotter/estargz + +go 1.13 + +require ( + github.com/opencontainers/go-digest v1.0.0 + github.com/pkg/errors v0.9.1 + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a +) diff --git a/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum new file mode 100644 index 000000000..d81890710 --- /dev/null +++ b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum @@ -0,0 +1,6 @@ +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go new file mode 100644 index 000000000..7b05ab53f --- /dev/null +++ b/extender/code/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -0,0 +1,256 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "os" + "path" + "time" + + digest "github.com/opencontainers/go-digest" +) + +const ( + // TOCTarName is the name of the JSON file in the tar archive in the + // table of contents gzip stream. + TOCTarName = "stargz.index.json" + + // FooterSize is the number of bytes in the footer + // + // The footer is an empty gzip stream with no compression and an Extra + // header of the form "%016xSTARGZ", where the 64 bit hex-encoded + // number is the offset to the gzip stream of JSON TOC. + // + // 51 comes from: + // + // 10 bytes gzip header + // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + // 2 bytes Extra: SI1 = 'S', SI2 = 'G' + // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + // 5 bytes flate header + // 8 bytes gzip footer + // (End of the eStargz blob) + // + // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz. + FooterSize = 51 + + // legacyFooterSize is the number of bytes in the legacy stargz footer. + // + // 47 comes from: + // + // 10 byte gzip header + + // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" + + // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset)) + // 5 byte flate header + // 8 byte gzip footer (two little endian uint32s: digest, size) + legacyFooterSize = 47 + + // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the + // digest of the TOC JSON. + // This annotation is valid only when it is specified in `.[]layers.annotations` + // of an image manifest. + TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + + // PrefetchLandmark is a file entry which indicates the end position of + // prefetch in the stargz file. + PrefetchLandmark = ".prefetch.landmark" + + // NoPrefetchLandmark is a file entry which indicates that no prefetch should + // occur in the stargz file. + NoPrefetchLandmark = ".no.prefetch.landmark" + + landmarkContents = 0xf +) + +// jtoc is the JSON-serialized table of contents index of the files in the stargz file. +type jtoc struct { + Version int `json:"version"` + Entries []*TOCEntry `json:"entries"` +} + +// TOCEntry is an entry in the stargz file's TOC (Table of Contents). +type TOCEntry struct { + // Name is the tar entry's name. It is the complete path + // stored in the tar file, not just the base name. + Name string `json:"name"` + + // Type is one of "dir", "reg", "symlink", "hardlink", "char", + // "block", "fifo", or "chunk". + // The "chunk" type is used for regular file data chunks past the first + // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset, + // ChunkOffset, and ChunkSize populated. + Type string `json:"type"` + + // Size, for regular files, is the logical size of the file. + Size int64 `json:"size,omitempty"` + + // ModTime3339 is the modification time of the tar entry. Empty + // means zero or unknown. Otherwise it's in UTC RFC3339 + // format. Use the ModTime method to access the time.Time value. + ModTime3339 string `json:"modtime,omitempty"` + modTime time.Time + + // LinkName, for symlinks and hardlinks, is the link target. + LinkName string `json:"linkName,omitempty"` + + // Mode is the permission and mode bits. + Mode int64 `json:"mode,omitempty"` + + // UID is the user ID of the owner. + UID int `json:"uid,omitempty"` + + // GID is the group ID of the owner. + GID int `json:"gid,omitempty"` + + // Uname is the username of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same UID. + Uname string `json:"userName,omitempty"` + + // Gname is the group name of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same GID. + Gname string `json:"groupName,omitempty"` + + // Offset, for regular files, provides the offset in the + // stargz file to the file's data bytes. See ChunkOffset and + // ChunkSize. + Offset int64 `json:"offset,omitempty"` + + nextOffset int64 // the Offset of the next entry with a non-zero Offset + + // DevMajor is the major device number for "char" and "block" types. + DevMajor int `json:"devMajor,omitempty"` + + // DevMinor is the major device number for "char" and "block" types. + DevMinor int `json:"devMinor,omitempty"` + + // NumLink is the number of entry names pointing to this entry. + // Zero means one name references this entry. + NumLink int + + // Xattrs are the extended attribute for the entry. + Xattrs map[string][]byte `json:"xattrs,omitempty"` + + // Digest stores the OCI checksum for regular files payload. + // It has the form "sha256:abcdef01234....". + Digest string `json:"digest,omitempty"` + + // ChunkOffset is non-zero if this is a chunk of a large, + // regular file. If so, the Offset is where the gzip header of + // ChunkSize bytes at ChunkOffset in Name begin. + // + // In serialized form, a "chunkSize" JSON field of zero means + // that the chunk goes to the end of the file. After reading + // from the stargz TOC, though, the ChunkSize is initialized + // to a non-zero file for when Type is either "reg" or + // "chunk". + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkSize int64 `json:"chunkSize,omitempty"` + + // ChunkDigest stores an OCI digest of the chunk. This must be formed + // as "sha256:0123abcd...". + ChunkDigest string `json:"chunkDigest,omitempty"` + + children map[string]*TOCEntry +} + +// ModTime returns the entry's modification time. +func (e *TOCEntry) ModTime() time.Time { return e.modTime } + +// NextOffset returns the position (relative to the start of the +// stargz file) of the next gzip boundary after e.Offset. +func (e *TOCEntry) NextOffset() int64 { return e.nextOffset } + +func (e *TOCEntry) addChild(baseName string, child *TOCEntry) { + if e.children == nil { + e.children = make(map[string]*TOCEntry) + } + if child.Type == "dir" { + e.NumLink++ // Entry ".." in the subdirectory links to this directory + } + e.children[baseName] = child +} + +// isDataType reports whether TOCEntry is a regular file or chunk (something that +// contains regular file data). +func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" } + +// Stat returns a FileInfo value representing e. +func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} } + +// ForeachChild calls f for each child item. If f returns false, iteration ends. +// If e is not a directory, f is not called. +func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) { + for name, ent := range e.children { + if !f(name, ent) { + return + } + } +} + +// LookupChild returns the directory e's child by its base name. +func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) { + child, ok = e.children[baseName] + return +} + +// fileInfo implements os.FileInfo using the wrapped *TOCEntry. +type fileInfo struct{ e *TOCEntry } + +var _ os.FileInfo = fileInfo{} + +func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } +func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } +func (fi fileInfo) Size() int64 { return fi.e.Size } +func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } +func (fi fileInfo) Sys() interface{} { return fi.e } +func (fi fileInfo) Mode() (m os.FileMode) { + m = os.FileMode(fi.e.Mode) & os.ModePerm + switch fi.e.Type { + case "dir": + m |= os.ModeDir + case "symlink": + m |= os.ModeSymlink + case "char": + m |= os.ModeDevice | os.ModeCharDevice + case "block": + m |= os.ModeDevice + case "fifo": + m |= os.ModeNamedPipe + } + // TODO: ModeSetuid, ModeSetgid, if/as needed. + return m +} + +// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained +// in a eStargz blob. +type TOCEntryVerifier interface { + + // Verifier provides a content verifier that can be used for verifying the + // contents of the specified TOCEntry. + Verifier(ce *TOCEntry) (digest.Verifier, error) +} diff --git a/extender/code/vendor/github.com/coreos/etcd/LICENSE b/extender/code/vendor/github.com/coreos/etcd/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/extender/code/vendor/github.com/coreos/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/coreos/etcd/NOTICE b/extender/code/vendor/github.com/coreos/etcd/NOTICE new file mode 100644 index 000000000..b39ddfa5c --- /dev/null +++ b/extender/code/vendor/github.com/coreos/etcd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/extender/code/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/extender/code/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go new file mode 100644 index 000000000..fd9ee3729 --- /dev/null +++ b/extender/code/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -0,0 +1,2004 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: raft.proto + +/* + Package raftpb is a generated protocol buffer package. + + It is generated from these files: + raft.proto + + It has these top-level messages: + Entry + SnapshotMetadata + Snapshot + Message + HardState + ConfState + ConfChange +*/ +package raftpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type EntryType int32 + +const ( + EntryNormal EntryType = 0 + EntryConfChange EntryType = 1 +) + +var EntryType_name = map[int32]string{ + 0: "EntryNormal", + 1: "EntryConfChange", +} +var EntryType_value = map[string]int32{ + "EntryNormal": 0, + "EntryConfChange": 1, +} + +func (x EntryType) Enum() *EntryType { + p := new(EntryType) + *p = x + return p +} +func (x EntryType) String() string { + return proto.EnumName(EntryType_name, int32(x)) +} +func (x *EntryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") + if err != nil { + return err + } + *x = EntryType(value) + return nil +} +func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type MessageType int32 + +const ( + MsgHup MessageType = 0 + MsgBeat MessageType = 1 + MsgProp MessageType = 2 + MsgApp MessageType = 3 + MsgAppResp MessageType = 4 + MsgVote MessageType = 5 + MsgVoteResp MessageType = 6 + MsgSnap MessageType = 7 + MsgHeartbeat MessageType = 8 + MsgHeartbeatResp MessageType = 9 + MsgUnreachable MessageType = 10 + MsgSnapStatus MessageType = 11 + MsgCheckQuorum MessageType = 12 + MsgTransferLeader MessageType = 13 + MsgTimeoutNow MessageType = 14 + MsgReadIndex MessageType = 15 + MsgReadIndexResp MessageType = 16 + MsgPreVote MessageType = 17 + MsgPreVoteResp MessageType = 18 +) + +var MessageType_name = map[int32]string{ + 0: "MsgHup", + 1: "MsgBeat", + 2: "MsgProp", + 3: "MsgApp", + 4: "MsgAppResp", + 5: "MsgVote", + 6: "MsgVoteResp", + 7: "MsgSnap", + 8: "MsgHeartbeat", + 9: "MsgHeartbeatResp", + 10: "MsgUnreachable", + 11: "MsgSnapStatus", + 12: "MsgCheckQuorum", + 13: "MsgTransferLeader", + 14: "MsgTimeoutNow", + 15: "MsgReadIndex", + 16: "MsgReadIndexResp", + 17: "MsgPreVote", + 18: "MsgPreVoteResp", +} +var MessageType_value = map[string]int32{ + "MsgHup": 0, + "MsgBeat": 1, + "MsgProp": 2, + "MsgApp": 3, + "MsgAppResp": 4, + "MsgVote": 5, + "MsgVoteResp": 6, + "MsgSnap": 7, + "MsgHeartbeat": 8, + "MsgHeartbeatResp": 9, + "MsgUnreachable": 10, + "MsgSnapStatus": 11, + "MsgCheckQuorum": 12, + "MsgTransferLeader": 13, + "MsgTimeoutNow": 14, + "MsgReadIndex": 15, + "MsgReadIndexResp": 16, + "MsgPreVote": 17, + "MsgPreVoteResp": 18, +} + +func (x MessageType) Enum() *MessageType { + p := new(MessageType) + *p = x + return p +} +func (x MessageType) String() string { + return proto.EnumName(MessageType_name, int32(x)) +} +func (x *MessageType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") + if err != nil { + return err + } + *x = MessageType(value) + return nil +} +func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type ConfChangeType int32 + +const ( + ConfChangeAddNode ConfChangeType = 0 + ConfChangeRemoveNode ConfChangeType = 1 + ConfChangeUpdateNode ConfChangeType = 2 + ConfChangeAddLearnerNode ConfChangeType = 3 +) + +var ConfChangeType_name = map[int32]string{ + 0: "ConfChangeAddNode", + 1: "ConfChangeRemoveNode", + 2: "ConfChangeUpdateNode", + 3: "ConfChangeAddLearnerNode", +} +var ConfChangeType_value = map[string]int32{ + "ConfChangeAddNode": 0, + "ConfChangeRemoveNode": 1, + "ConfChangeUpdateNode": 2, + "ConfChangeAddLearnerNode": 3, +} + +func (x ConfChangeType) Enum() *ConfChangeType { + p := new(ConfChangeType) + *p = x + return p +} +func (x ConfChangeType) String() string { + return proto.EnumName(ConfChangeType_name, int32(x)) +} +func (x *ConfChangeType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") + if err != nil { + return err + } + *x = ConfChangeType(value) + return nil +} +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Entry struct { + Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` + Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` + Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` + Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type SnapshotMetadata struct { + ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` + Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type Snapshot struct { + Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Message struct { + Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` + To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` + From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` + Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` + LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` + Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` + Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` + Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` + Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` + Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` + RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` + Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type HardState struct { + Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` + Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` + Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } + +type ConfState struct { + Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"` + Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } + +type ConfChange struct { + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"` + NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"` + Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChange) Reset() { *m = ConfChange{} } +func (m *ConfChange) String() string { return proto.CompactTextString(m) } +func (*ConfChange) ProtoMessage() {} +func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +func init() { + proto.RegisterType((*Entry)(nil), "raftpb.Entry") + proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") + proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") + proto.RegisterType((*Message)(nil), "raftpb.Message") + proto.RegisterType((*HardState)(nil), "raftpb.HardState") + proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") + proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") + proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) + proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) +} +func (m *Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Entry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if m.Data != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ConfState.Size())) + n1, err := m.ConfState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Data != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Metadata.Size())) + n2, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.To)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.From)) + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x28 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) + dAtA[i] = 0x30 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x40 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Snapshot.Size())) + n3, err := m.Snapshot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x50 + i++ + if m.Reject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) + if m.Context != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *HardState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HardState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, num := range m.Nodes { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.Learners) > 0 { + for _, num := range m.Learners { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.Context != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Entry) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Index)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotMetadata) Size() (n int) { + var l int + _ = l + l = m.ConfState.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 1 + sovRaft(uint64(m.Index)) + n += 1 + sovRaft(uint64(m.Term)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + l = m.Metadata.Size() + n += 1 + l + sovRaft(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.To)) + n += 1 + sovRaft(uint64(m.From)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.LogTerm)) + n += 1 + sovRaft(uint64(m.Index)) + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + n += 1 + sovRaft(uint64(m.Commit)) + l = m.Snapshot.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 2 + n += 1 + sovRaft(uint64(m.RejectHint)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HardState) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Vote)) + n += 1 + sovRaft(uint64(m.Commit)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfState) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.Learners) > 0 { + for _, e := range m.Learners { + n += 1 + sovRaft(uint64(e)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChange) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.ID)) + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRaft(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaft(x uint64) (n int) { + return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (EntryType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + m.To = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.To |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType) + } + m.LogTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Reject = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType) + } + m.RejectHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectHint |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HardState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HardState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaft(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaft + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaft(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 815 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0x23, 0x45, + 0x10, 0xf6, 0x8c, 0xc7, 0x7f, 0x35, 0x8e, 0xd3, 0xa9, 0x35, 0xa8, 0x15, 0x45, 0xc6, 0xb2, 0x38, + 0x58, 0x41, 0x1b, 0x20, 0x07, 0x0e, 0x48, 0x1c, 0x36, 0x09, 0x52, 0x22, 0xad, 0xa3, 0xc5, 0x9b, + 0xe5, 0x80, 0x84, 0x50, 0xc7, 0x53, 0x9e, 0x18, 0x32, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20, + 0x1e, 0x80, 0x07, 0xe0, 0xc2, 0xfb, 0xe4, 0xb8, 0x12, 0x77, 0xc4, 0x86, 0x17, 0x41, 0xdd, 0xd3, + 0x63, 0xcf, 0x24, 0xb7, 0xae, 0xef, 0xab, 0xae, 0xfa, 0xea, 0xeb, 0x9a, 0x01, 0x50, 0x62, 0xa9, + 0x8f, 0x32, 0x25, 0xb5, 0xc4, 0xb6, 0x39, 0x67, 0xd7, 0xfb, 0xc3, 0x58, 0xc6, 0xd2, 0x42, 0x9f, + 0x9b, 0x53, 0xc1, 0x4e, 0x7e, 0x83, 0xd6, 0xb7, 0xa9, 0x56, 0x77, 0xf8, 0x19, 0x04, 0x57, 0x77, + 0x19, 0x71, 0x6f, 0xec, 0x4d, 0x07, 0xc7, 0x7b, 0x47, 0xc5, 0xad, 0x23, 0x4b, 0x1a, 0xe2, 0x24, + 0xb8, 0xff, 0xe7, 0x93, 0xc6, 0xdc, 0x26, 0x21, 0x87, 0xe0, 0x8a, 0x54, 0xc2, 0xfd, 0xb1, 0x37, + 0x0d, 0x36, 0x0c, 0xa9, 0x04, 0xf7, 0xa1, 0x75, 0x91, 0x46, 0xf4, 0x8e, 0x37, 0x2b, 0x54, 0x01, + 0x21, 0x42, 0x70, 0x26, 0xb4, 0xe0, 0xc1, 0xd8, 0x9b, 0xf6, 0xe7, 0xf6, 0x3c, 0xf9, 0xdd, 0x03, + 0xf6, 0x3a, 0x15, 0x59, 0x7e, 0x23, 0xf5, 0x8c, 0xb4, 0x88, 0x84, 0x16, 0xf8, 0x15, 0xc0, 0x42, + 0xa6, 0xcb, 0x9f, 0x72, 0x2d, 0x74, 0xa1, 0x28, 0xdc, 0x2a, 0x3a, 0x95, 0xe9, 0xf2, 0xb5, 0x21, + 0x5c, 0xf1, 0xde, 0xa2, 0x04, 0x4c, 0xf3, 0x95, 0x6d, 0x5e, 0xd5, 0x55, 0x40, 0x46, 0xb2, 0x36, + 0x92, 0xab, 0xba, 0x2c, 0x32, 0xf9, 0x01, 0xba, 0xa5, 0x02, 0x23, 0xd1, 0x28, 0xb0, 0x3d, 0xfb, + 0x73, 0x7b, 0xc6, 0xaf, 0xa1, 0x9b, 0x38, 0x65, 0xb6, 0x70, 0x78, 0xcc, 0x4b, 0x2d, 0x8f, 0x95, + 0xbb, 0xba, 0x9b, 0xfc, 0xc9, 0x5f, 0x4d, 0xe8, 0xcc, 0x28, 0xcf, 0x45, 0x4c, 0xf8, 0x1c, 0x02, + 0xbd, 0x75, 0xf8, 0x59, 0x59, 0xc3, 0xd1, 0x55, 0x8f, 0x4d, 0x1a, 0x0e, 0xc1, 0xd7, 0xb2, 0x36, + 0x89, 0xaf, 0xa5, 0x19, 0x63, 0xa9, 0xe4, 0xa3, 0x31, 0x0c, 0xb2, 0x19, 0x30, 0x78, 0x3c, 0x20, + 0x8e, 0xa0, 0x73, 0x2b, 0x63, 0xfb, 0x60, 0xad, 0x0a, 0x59, 0x82, 0x5b, 0xdb, 0xda, 0x4f, 0x6d, + 0x7b, 0x0e, 0x1d, 0x4a, 0xb5, 0x5a, 0x51, 0xce, 0x3b, 0xe3, 0xe6, 0x34, 0x3c, 0xde, 0xa9, 0x6d, + 0x46, 0x59, 0xca, 0xe5, 0xe0, 0x01, 0xb4, 0x17, 0x32, 0x49, 0x56, 0x9a, 0x77, 0x2b, 0xb5, 0x1c, + 0x86, 0xc7, 0xd0, 0xcd, 0x9d, 0x63, 0xbc, 0x67, 0x9d, 0x64, 0x8f, 0x9d, 0x2c, 0x1d, 0x2c, 0xf3, + 0x4c, 0x45, 0x45, 0x3f, 0xd3, 0x42, 0x73, 0x18, 0x7b, 0xd3, 0x6e, 0x59, 0xb1, 0xc0, 0xf0, 0x53, + 0x80, 0xe2, 0x74, 0xbe, 0x4a, 0x35, 0x0f, 0x2b, 0x3d, 0x2b, 0x38, 0x72, 0xe8, 0x2c, 0x64, 0xaa, + 0xe9, 0x9d, 0xe6, 0x7d, 0xfb, 0xb0, 0x65, 0x38, 0xf9, 0x11, 0x7a, 0xe7, 0x42, 0x45, 0xc5, 0xfa, + 0x94, 0x0e, 0x7a, 0x4f, 0x1c, 0xe4, 0x10, 0xbc, 0x95, 0x9a, 0xea, 0xfb, 0x6e, 0x90, 0xca, 0xc0, + 0xcd, 0xa7, 0x03, 0x4f, 0xbe, 0x81, 0xde, 0x66, 0x5d, 0x71, 0x08, 0xad, 0x54, 0x46, 0x94, 0x73, + 0x6f, 0xdc, 0x9c, 0x06, 0xf3, 0x22, 0xc0, 0x7d, 0xe8, 0xde, 0x92, 0x50, 0x29, 0xa9, 0x9c, 0xfb, + 0x96, 0xd8, 0xc4, 0x93, 0x3f, 0x3c, 0x00, 0x73, 0xff, 0xf4, 0x46, 0xa4, 0xb1, 0xdd, 0x88, 0x8b, + 0xb3, 0x9a, 0x3a, 0xff, 0xe2, 0x0c, 0xbf, 0x70, 0x1f, 0xae, 0x6f, 0xd7, 0xea, 0xe3, 0xea, 0x67, + 0x52, 0xdc, 0x7b, 0xf2, 0xf5, 0x1e, 0x40, 0xfb, 0x52, 0x46, 0x74, 0x71, 0x56, 0xd7, 0x5c, 0x60, + 0xc6, 0xac, 0x53, 0x67, 0x56, 0xf1, 0xa1, 0x96, 0xe1, 0xe1, 0x97, 0xd0, 0xdb, 0xfc, 0x0e, 0x70, + 0x17, 0x42, 0x1b, 0x5c, 0x4a, 0x95, 0x88, 0x5b, 0xd6, 0xc0, 0x67, 0xb0, 0x6b, 0x81, 0x6d, 0x63, + 0xe6, 0x1d, 0xfe, 0xed, 0x43, 0x58, 0x59, 0x70, 0x04, 0x68, 0xcf, 0xf2, 0xf8, 0x7c, 0x9d, 0xb1, + 0x06, 0x86, 0xd0, 0x99, 0xe5, 0xf1, 0x09, 0x09, 0xcd, 0x3c, 0x17, 0xbc, 0x52, 0x32, 0x63, 0xbe, + 0xcb, 0x7a, 0x91, 0x65, 0xac, 0x89, 0x03, 0x80, 0xe2, 0x3c, 0xa7, 0x3c, 0x63, 0x81, 0x4b, 0xfc, + 0x5e, 0x6a, 0x62, 0x2d, 0x23, 0xc2, 0x05, 0x96, 0x6d, 0x3b, 0xd6, 0x2c, 0x13, 0xeb, 0x20, 0x83, + 0xbe, 0x69, 0x46, 0x42, 0xe9, 0x6b, 0xd3, 0xa5, 0x8b, 0x43, 0x60, 0x55, 0xc4, 0x5e, 0xea, 0x21, + 0xc2, 0x60, 0x96, 0xc7, 0x6f, 0x52, 0x45, 0x62, 0x71, 0x23, 0xae, 0x6f, 0x89, 0x01, 0xee, 0xc1, + 0x8e, 0x2b, 0x64, 0x1e, 0x6f, 0x9d, 0xb3, 0xd0, 0xa5, 0x9d, 0xde, 0xd0, 0xe2, 0x97, 0xef, 0xd6, + 0x52, 0xad, 0x13, 0xd6, 0xc7, 0x8f, 0x60, 0x6f, 0x96, 0xc7, 0x57, 0x4a, 0xa4, 0xf9, 0x92, 0xd4, + 0x4b, 0x12, 0x11, 0x29, 0xb6, 0xe3, 0x6e, 0x5f, 0xad, 0x12, 0x92, 0x6b, 0x7d, 0x29, 0x7f, 0x65, + 0x03, 0x27, 0x66, 0x4e, 0x22, 0xb2, 0x3f, 0x43, 0xb6, 0xeb, 0xc4, 0x6c, 0x10, 0x2b, 0x86, 0xb9, + 0x79, 0x5f, 0x29, 0xb2, 0x23, 0xee, 0xb9, 0xae, 0x2e, 0xb6, 0x39, 0x78, 0x78, 0x07, 0x83, 0xfa, + 0xf3, 0x1a, 0x1d, 0x5b, 0xe4, 0x45, 0x14, 0x99, 0xb7, 0x64, 0x0d, 0xe4, 0x30, 0xdc, 0xc2, 0x73, + 0x4a, 0xe4, 0x5b, 0xb2, 0x8c, 0x57, 0x67, 0xde, 0x64, 0x91, 0xd0, 0x05, 0xe3, 0xe3, 0x01, 0xf0, + 0x5a, 0xa9, 0x97, 0xc5, 0x36, 0x5a, 0xb6, 0x79, 0xc2, 0xef, 0x3f, 0x8c, 0x1a, 0xef, 0x3f, 0x8c, + 0x1a, 0xf7, 0x0f, 0x23, 0xef, 0xfd, 0xc3, 0xc8, 0xfb, 0xf7, 0x61, 0xe4, 0xfd, 0xf9, 0xdf, 0xa8, + 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x86, 0x52, 0x5b, 0xe0, 0x74, 0x06, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto b/extender/code/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto new file mode 100644 index 000000000..644ce7b8f --- /dev/null +++ b/extender/code/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto @@ -0,0 +1,95 @@ +syntax = "proto2"; +package raftpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +enum EntryType { + EntryNormal = 0; + EntryConfChange = 1; +} + +message Entry { + optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional EntryType Type = 1 [(gogoproto.nullable) = false]; + optional bytes Data = 4; +} + +message SnapshotMetadata { + optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; + optional uint64 index = 2 [(gogoproto.nullable) = false]; + optional uint64 term = 3 [(gogoproto.nullable) = false]; +} + +message Snapshot { + optional bytes data = 1; + optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; +} + +enum MessageType { + MsgHup = 0; + MsgBeat = 1; + MsgProp = 2; + MsgApp = 3; + MsgAppResp = 4; + MsgVote = 5; + MsgVoteResp = 6; + MsgSnap = 7; + MsgHeartbeat = 8; + MsgHeartbeatResp = 9; + MsgUnreachable = 10; + MsgSnapStatus = 11; + MsgCheckQuorum = 12; + MsgTransferLeader = 13; + MsgTimeoutNow = 14; + MsgReadIndex = 15; + MsgReadIndexResp = 16; + MsgPreVote = 17; + MsgPreVoteResp = 18; +} + +message Message { + optional MessageType type = 1 [(gogoproto.nullable) = false]; + optional uint64 to = 2 [(gogoproto.nullable) = false]; + optional uint64 from = 3 [(gogoproto.nullable) = false]; + optional uint64 term = 4 [(gogoproto.nullable) = false]; + optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; + optional uint64 index = 6 [(gogoproto.nullable) = false]; + repeated Entry entries = 7 [(gogoproto.nullable) = false]; + optional uint64 commit = 8 [(gogoproto.nullable) = false]; + optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; + optional bool reject = 10 [(gogoproto.nullable) = false]; + optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; + optional bytes context = 12; +} + +message HardState { + optional uint64 term = 1 [(gogoproto.nullable) = false]; + optional uint64 vote = 2 [(gogoproto.nullable) = false]; + optional uint64 commit = 3 [(gogoproto.nullable) = false]; +} + +message ConfState { + repeated uint64 nodes = 1; + repeated uint64 learners = 2; +} + +enum ConfChangeType { + ConfChangeAddNode = 0; + ConfChangeRemoveNode = 1; + ConfChangeUpdateNode = 2; + ConfChangeAddLearnerNode = 3; +} + +message ConfChange { + optional uint64 ID = 1 [(gogoproto.nullable) = false]; + optional ConfChangeType Type = 2 [(gogoproto.nullable) = false]; + optional uint64 NodeID = 3 [(gogoproto.nullable) = false]; + optional bytes Context = 4; +} diff --git a/extender/code/vendor/github.com/davecgh/go-spew/LICENSE b/extender/code/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 000000000..bc52e96f2 --- /dev/null +++ b/extender/code/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/extender/code/vendor/github.com/davecgh/go-spew/spew/bypass.go b/extender/code/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 000000000..792994785 --- /dev/null +++ b/extender/code/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,145 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +type flag uintptr + +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag +) + +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) + +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } + } + panic("reflect.Value read-only flag has changed semantics") +} diff --git a/extender/code/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/extender/code/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 000000000..205c28d68 --- /dev/null +++ b/extender/code/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/extender/code/vendor/github.com/davecgh/go-spew/spew/common.go b/extender/code/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 000000000..1be8ce945 --- /dev/null +++ b/extender/code/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/extender/code/vendor/github.com/davecgh/go-spew/spew/config.go b/extender/code/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 000000000..2e3d22f31 --- /dev/null +++ b/extender/code/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/extender/code/vendor/github.com/davecgh/go-spew/spew/doc.go b/extender/code/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 000000000..aacaac6f1 --- /dev/null +++ b/extender/code/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/extender/code/vendor/github.com/davecgh/go-spew/spew/dump.go b/extender/code/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 000000000..f78d89fc1 --- /dev/null +++ b/extender/code/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/extender/code/vendor/github.com/davecgh/go-spew/spew/format.go b/extender/code/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 000000000..b04edb7d7 --- /dev/null +++ b/extender/code/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/extender/code/vendor/github.com/davecgh/go-spew/spew/spew.go b/extender/code/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 000000000..32c0e3388 --- /dev/null +++ b/extender/code/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/extender/code/vendor/github.com/docker/cli/AUTHORS b/extender/code/vendor/github.com/docker/cli/AUTHORS new file mode 100644 index 000000000..ecb6251ba --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/AUTHORS @@ -0,0 +1,723 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `scripts/docs/generate-authors.sh`. + +Aanand Prasad +Aaron L. Xu +Aaron Lehmann +Aaron.L.Xu +Abdur Rehman +Abhinandan Prativadi +Abin Shahab +Ace Tang +Addam Hardy +Adolfo Ochagavía +Adrian Plata +Adrien Duermael +Adrien Folie +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Akihiro Suda +Akim Demaille +Alan Thompson +Albert Callarisa +Aleksa Sarai +Alessandro Boch +Alex Mavrogiannis +Alex Mayer +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Ryabov +Alexandre González +Alfred Landrum +Alicia Lauerman +Allen Sun +Alvin Deng +Amen Belayneh +Amir Goldstein +Amit Krishnan +Amit Shukla +Amy Lindburg +Anda Xu +Andrea Luzzardi +Andreas Köhler +Andrew France +Andrew Hsu +Andrew Macpherson +Andrew McDonnell +Andrew Po +Andrey Petrov +André Martins +Andy Goldstein +Andy Rothfusz +Anil Madhavapeddy +Ankush Agarwal +Anne Henmi +Anton Polonskiy +Antonio Murdaca +Antonis Kalipetis +Anusha Ragunathan +Ao Li +Arash Deshmeh +Arnaud Porterie +Ashwini Oruganti +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Bastiaan Bakker +BastianHofmann +Ben Bonnefoy +Ben Creasy +Ben Firshman +Benjamin Boudreau +Benoit Sigoure +Bhumika Bayani +Bill Wang +Bin Liu +Bingshen Wang +Boaz Shuster +Bogdan Anton +Boris Pruessmann +Bradley Cicenas +Brandon Mitchell +Brandon Philips +Brent Salisbury +Bret Fisher +Brian (bex) Exelbierd +Brian Goff +Bryan Bess +Bryan Boreham +Bryan Murphy +bryfry +Cameron Spear +Cao Weiwei +Carlo Mion +Carlos Alexandro Becker +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Faragher +Chao Wang +Charles Chan +Charles Law +Charles Smith +Charlie Drage +ChaYoung You +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +Chen Qiu +Chris Gavin +Chris Gibson +Chris McKinnel +Chris Snow +Chris Weyl +Christian Persson +Christian Stefanescu +Christophe Robin +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Jones +Christy Norman +Chun Chen +Clinton Kitson +Coenraad Loubser +Colin Hebert +Collin Guarino +Colm Hally +Corey Farrell +Corey Quon +Craig Wilhite +Cristian Staretu +Daehyeok Mun +Dafydd Crosby +dalanlan +Damien Nadé +Dan Cotora +Daniel Cassidy +Daniel Dao +Daniel Farrell +Daniel Gasienica +Daniel Goosen +Daniel Hiltgen +Daniel J Walsh +Daniel Nephin +Daniel Norberg +Daniel Watkins +Daniel Zhang +Danny Berger +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Dave Goodchild +Dave Henderson +Dave Tucker +David Beitey +David Calavera +David Cramer +David Dooling +David Gageot +David Lechner +David Scott +David Sheets +David Williamson +David Xia +David Young +Deng Guangxing +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek McGowan +Deshi Xiao +Dharmit Shah +Dhawal Yogesh Bhanushali +Dieter Reuter +Dima Stopel +Dimitry Andric +Ding Fei +Diogo Monica +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Don Kjer +Dong Chen +Doug Davis +Drew Erny +Ed Costello +Elango Sivanandam +Eli Uriegas +Eli Uriegas +Elias Faxö +Elliot Luo <956941328@qq.com> +Eric Curtin +Eric G. Noriega +Eric Rosenberg +Eric Sage +Eric-Olivier Lamey +Erica Windisch +Erik Hollensbe +Erik St. Martin +Essam A. Hassan +Ethan Haynes +Euan Kemp +Eugene Yakubovich +Evan Allrich +Evan Hazlett +Evan Krall +Evelyn Xu +Everett Toews +Fabio Falci +Fabrizio Soppelsa +Felix Hupfeld +Felix Rabe +Filip Jareš +Flavio Crisciani +Florian Klein +Forest Johnson +Foysal Iqbal +François Scala +Fred Lifton +Frederic Hemberger +Frederick F. Kautz IV +Frederik Nordahl Jul Sabroe +Frieder Bluemle +Gabriel Nicolas Avellaneda +Gaetan de Villele +Gang Qiao +Gary Schaetz +Genki Takiuchi +George MacRorie +George Xie +Gianluca Borello +Gildas Cuisinier +Goksu Toprak +Gou Rao +Grant Reaber +Greg Pflaum +Guilhem Lettron +Guillaume J. Charmes +Guillaume Le Floch +gwx296173 +Günther Jungbluth +Hakan Özler +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harold Cooper +Harry Zhang +He Simei +Helen Xie +Henning Sprang +Henry N +Hernan Garcia +Hongbin Lu +Hu Keping +Huayi Zhang +huqun +Huu Nguyen +Hyzhou Zhy +Ian Campbell +Ian Philpot +Ignacio Capurro +Ilya Dmitrichenko +Ilya Khlopotov +Ilya Sotkov +Ioan Eugen Stan +Isabel Jimenez +Ivan Grcic +Ivan Markin +Jacob Atzen +Jacob Tomlinson +Jaivish Kothari +Jake Lambert +Jake Sanders +James Nesbitt +James Turnbull +Jamie Hannaford +Jan Koprowski +Jan Pazdziora +Jan-Jaap Driessen +Jana Radhakrishnan +Jared Hocutt +Jasmine Hegman +Jason Heiss +Jason Plum +Jay Kamat +Jean Rouge +Jean-Christophe Sirot +Jean-Pierre Huynh +Jeff Lindsay +Jeff Nickoloff +Jeff Silberman +Jeremy Chambers +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jesse Adametz +Jessica Frazelle +Jezeniel Zapanta +Jian Zhang +Jie Luo +Jilles Oldenbeuving +Jim Galasyn +Jimmy Leger +Jimmy Song +jimmyxian +Jintao Zhang +Joao Fernandes +Joe Doliner +Joe Gordon +Joel Handwell +Joey Geiger +Joffrey F +Johan Euphrosine +Johannes 'fish' Ziemke +John Feminella +John Harris +John Howard (VM) +John Laswell +John Maguire +John Mulhausen +John Starks +John Stephens +John Tims +John V. Martinez +John Willis +Jonathan Boulle +Jonathan Lee +Jonathan Lomas +Jonathan McCrohan +Jonh Wendell +Jordan Jennings +Joseph Kern +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Soref +Julien Barbier +Julien Kassar +Julien Maitrehenry +Justas Brazauskas +Justin Cormack +Justin Simonelis +Justyn Temme +Jyrki Puttonen +Jérémie Drouet +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu (Kennan) +Kara Alexandra +Kareem Khazem +Karthik Nayak +Kat Samperi +Kathryn Spiers +Katie McLaughlin +Ke Xu +Kei Ohmura +Keith Hudgins +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kevin Burke +Kevin Feyrer +Kevin Kern +Kevin Kirsche +Kevin Meredith +Kevin Richardson +khaled souf +Kim Eik +Kir Kolyshkin +Kotaro Yoshimatsu +Krasi Georgiev +Kris-Mikael Krister +Kun Zhang +Kunal Kushwaha +Lachlan Cooper +Lai Jiangshan +Lars Kellogg-Stedman +Laura Frank +Laurent Erignoux +Lee Gaines +Lei Jitang +Lennie +Leo Gallucci +Lewis Daly +Li Yi +Li Yi +Liang-Chi Hsieh +Lifubang +Lihua Tang +Lily Guo +Lin Lu +Linus Heckemann +Liping Xue +Liron Levin +liwenqi +lixiaobing10051267 +Lloyd Dewolf +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Lucas Chan +Luka Hartwig +Lukasz Zajaczkowski +Lydell Manganti +Lénaïc Huard +Ma Shimiao +Mabin +Madhav Puri +Madhu Venugopal +Malte Janduda +Manjunath A Kumatagi +Mansi Nahar +mapk0y +Marc Bihlmaier +Marco Mariani +Marco Vedovati +Marcus Martins +Marianna Tessel +Marius Sturm +Mark Oates +Marsh Macy +Martin Mosegaard Amdisen +Mary Anthony +Mason Fish +Mason Malone +Mateusz Major +Mathieu Champlon +Matt Gucci +Matt Robenolt +Matteo Orefice +Matthew Heon +Matthieu Hauglustaine +Mauro Porras P +Max Shytikov +Maxime Petazzoni +Mei ChunTao +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Crosby +Michael Friis +Michael Irwin +Michael Käufl +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Steinert +Michael West +Michal Minář +Michał Czeraszkiewicz +Miguel Angel Alvarez Cabrerizo +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Danese +Mike Dillon +Mike Goelzer +Mike MacCana +mikelinjie <294893458@qq.com> +Mikhail Vasin +Milind Chawre +Mindaugas Rukas +Misty Stanley-Jones +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohini Anne Dsouza +Moorthy RS +Morgan Bauer +Moysés Borges +Mrunal Patel +muicoder +Muthukumar R +Máximo Cuadros +Mårten Cassel +Nace Oroz +Nahum Shalman +Nalin Dahyabhai +Nao YONASHIRO +Nassim 'Nass' Eddequiouaq +Natalie Parker +Nate Brennand +Nathan Hsieh +Nathan LeClaire +Nathan McCauley +Neil Peterson +Nick Adcock +Nico Stapelbroek +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nikhil Chawla +Nikolas Garofil +Nikolay Milovanov +Nir Soffer +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +O.S. Tezer +ohmystack +Olle Jonsson +Olli Janatuinen +Otto Kekäläinen +Ovidio Mallo +Pascal Borreli +Patrick Böänziger +Patrick Hemmer +Patrick Lang +Paul +Paul Kehrer +Paul Lietar +Paul Weaver +Pavel Pospisil +Paweł Szczekutowicz +Peeyush Gupta +Per Lundberg +Peter Edge +Peter Hsu +Peter Jaffe +Peter Kehl +Peter Nagy +Peter Salvatore +Peter Waller +Phil Estes +Philip Alexander Etling +Philipp Gillé +Philipp Schmied +pidster +pixelistik +Pratik Karki +Prayag Verma +Preston Cowley +Pure White +Qiang Huang +Qinglan Peng +qudongfang +Raghavendra K T +Ravi Shekhar Jethani +Ray Tsang +Reficul +Remy Suen +Renaud Gaubert +Ricardo N Feliciano +Rich Moyse +Richard Mathie +Richard Scothern +Rick Wieman +Ritesh H Shukla +Riyaz Faizullabhoy +Robert Wallis +Robin Naundorf +Robin Speekenbrink +Rodolfo Ortiz +Rogelio Canedo +Rohan Verma +Roland Kammerer +Roman Dudin +Rory Hunter +Ross Boucher +Rubens Figueiredo +Rui Cao +Ryan Belgrave +Ryan Detzel +Ryan Stelly +Ryan Wilson-Perkin +Ryan Zhang +Sainath Grandhi +Sakeven Jiang +Sally O'Malley +Sam Neirinck +Sambuddha Basu +Sami Tabet +Samuel Karp +Santhosh Manohar +Scott Brenner +Scott Collier +Sean Christopherson +Sean Rodman +Sebastiaan van Stijn +Sergey Tryuber +Serhat Gülçiçek +Sevki Hasirci +Shaun Kaasten +Sheng Yang +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shukui Yang +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silvin Lubecki +Simei He +Simon Ferquel +Sindhu S +Slava Semushin +Solomon Hykes +Song Gao +Spencer Brown +squeegels <1674195+squeegels@users.noreply.github.com> +Srini Brahmaroutu +Stefan S. +Stefan Scherer +Stefan Weil +Stephane Jeandeaux +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Richards +Steven Burgess +Subhajit Ghosh +Sun Jianbo +Sune Keller +Sungwon Han +Sunny Gogoi +Sven Dowideit +Sylvain Baubeau +Sébastien HOUZÉ +T K Sourabh +TAGOMORI Satoshi +taiji-tech +Taylor Jones +Tejaswini Duggaraju +Thatcher Peskens +Thomas Gazagnaire +Thomas Krzero +Thomas Leonard +Thomas Léveil +Thomas Riccardi +Thomas Swift +Tianon Gravi +Tianyi Wang +Tibor Vass +Tim Dettrick +Tim Hockin +Tim Smith +Tim Waugh +Tim Wraight +timfeirg +Timothy Hobbs +Tobias Bradtke +Tobias Gesellchen +Todd Whiteman +Tom Denham +Tom Fotherby +Tom Klingenberg +Tom Milligan +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomáš Hrčka +Tony Abboud +Tõnis Tiigi +Trapier Marshall +Travis Cline +Tristan Carel +Tycho Andersen +Tycho Andersen +uhayate +Ulysses Souza +Umesh Yadav +Valentin Lorentz +Veres Lajos +Victor Vieux +Victoria Bialas +Viktor Stanchev +Vimal Raghubir +Vincent Batts +Vincent Bernat +Vincent Demeester +Vincent Woo +Vishnu Kannan +Vivek Goyal +Wang Jie +Wang Lei +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wataru Ishida +Wayne Song +Wen Cheng Ma +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +William Henry +Xianglin Gao +Xiaodong Zhang +Xiaoxi He +Xinbo Weng +Xuecong Liao +Yan Feng +Yanqiang Miao +Yassine Tijani +Yi EungJun +Ying Li +Yong Tang +Yosef Fertel +Yu Peng +Yuan Sun +Yue Zhang +Yunxiang Huang +Zachary Romero +Zander Mackie +zebrilee +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhenghenghuo +Zhou Hao +Zhoulin Xie +Zhu Guihua +Álex González +Álvaro Lázaro +Átila Camurça Alves +徐俊杰 diff --git a/extender/code/vendor/github.com/docker/cli/LICENSE b/extender/code/vendor/github.com/docker/cli/LICENSE new file mode 100644 index 000000000..9c8e20ab8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/docker/cli/NOTICE b/extender/code/vendor/github.com/docker/cli/NOTICE new file mode 100644 index 000000000..58b19b6d1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/extender/code/vendor/github.com/docker/cli/cli/config/config.go b/extender/code/vendor/github.com/docker/cli/cli/config/config.go new file mode 100644 index 000000000..c860cf712 --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/cli/config/config.go @@ -0,0 +1,140 @@ +package config + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker/pkg/homedir" + "github.com/pkg/errors" +) + +const ( + // ConfigFileName is the name of config file + ConfigFileName = "config.json" + configFileDir = ".docker" + oldConfigfile = ".dockercfg" + contextsDir = "contexts" +) + +var ( + configDir = os.Getenv("DOCKER_CONFIG") +) + +func init() { + if configDir == "" { + configDir = filepath.Join(homedir.Get(), configFileDir) + } +} + +// Dir returns the directory the configuration file is stored in +func Dir() string { + return configDir +} + +// ContextStoreDir returns the directory the docker contexts are stored in +func ContextStoreDir() string { + return filepath.Join(Dir(), contextsDir) +} + +// SetDir sets the directory the configuration file is stored in +func SetDir(dir string) { + configDir = filepath.Clean(dir) +} + +// Path returns the path to a file relative to the config dir +func Path(p ...string) (string, error) { + path := filepath.Join(append([]string{Dir()}, p...)...) + if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { + return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir()) + } + return path, nil +} + +// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from +// a non-nested reader +func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LegacyLoadFromReader(configData) + return &configFile, err +} + +// LoadFromReader is a convenience function that creates a ConfigFile object from +// a reader +func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LoadFromReader(configData) + return &configFile, err +} + +// Load reads the configuration files in the given directory, and sets up +// the auth config information and returns values. +// FIXME: use the internal golang config parser +func Load(configDir string) (*configfile.ConfigFile, error) { + if configDir == "" { + configDir = Dir() + } + + filename := filepath.Join(configDir, ConfigFileName) + configFile := configfile.New(filename) + + // Try happy path first - latest config file + if _, err := os.Stat(filename); err == nil { + file, err := os.Open(filename) + if err != nil { + return configFile, errors.Wrap(err, filename) + } + defer file.Close() + err = configFile.LoadFromReader(file) + if err != nil { + err = errors.Wrap(err, filename) + } + return configFile, err + } else if !os.IsNotExist(err) { + // if file is there but we can't stat it for any reason other + // than it doesn't exist then stop + return configFile, errors.Wrap(err, filename) + } + + // Can't find latest config file so check for the old one + homedir, err := os.UserHomeDir() + if err != nil { + return configFile, errors.Wrap(err, oldConfigfile) + } + confFile := filepath.Join(homedir, oldConfigfile) + if _, err := os.Stat(confFile); err != nil { + return configFile, nil //missing file is not an error + } + file, err := os.Open(confFile) + if err != nil { + return configFile, errors.Wrap(err, filename) + } + defer file.Close() + err = configFile.LegacyLoadFromReader(file) + if err != nil { + return configFile, errors.Wrap(err, filename) + } + return configFile, nil +} + +// LoadDefaultConfigFile attempts to load the default config file and returns +// an initialized ConfigFile struct if none is found. +func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { + configFile, err := Load(Dir()) + if err != nil { + fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) + } + if !configFile.ContainsAuth() { + configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) + } + return configFile +} diff --git a/extender/code/vendor/github.com/docker/cli/cli/config/configfile/file.go b/extender/code/vendor/github.com/docker/cli/cli/config/configfile/file.go new file mode 100644 index 000000000..388a5d54d --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -0,0 +1,387 @@ +package configfile + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" + "github.com/pkg/errors" +) + +const ( + // This constant is only used for really old config files when the + // URL wasn't saved as part of the config file and it was just + // assumed to be this value. + defaultIndexServer = "https://index.docker.io/v1/" +) + +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]types.AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + NetworksFormat string `json:"networksFormat,omitempty"` + PluginsFormat string `json:"pluginsFormat,omitempty"` + VolumesFormat string `json:"volumesFormat,omitempty"` + StatsFormat string `json:"statsFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Filename string `json:"-"` // Note: for internal use only + ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` + ServicesFormat string `json:"servicesFormat,omitempty"` + TasksFormat string `json:"tasksFormat,omitempty"` + SecretFormat string `json:"secretFormat,omitempty"` + ConfigFormat string `json:"configFormat,omitempty"` + NodesFormat string `json:"nodesFormat,omitempty"` + PruneFilters []string `json:"pruneFilters,omitempty"` + Proxies map[string]ProxyConfig `json:"proxies,omitempty"` + Experimental string `json:"experimental,omitempty"` + StackOrchestrator string `json:"stackOrchestrator,omitempty"` + Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` + CurrentContext string `json:"currentContext,omitempty"` + CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` + Plugins map[string]map[string]string `json:"plugins,omitempty"` + Aliases map[string]string `json:"aliases,omitempty"` +} + +// ProxyConfig contains proxy configuration settings +type ProxyConfig struct { + HTTPProxy string `json:"httpProxy,omitempty"` + HTTPSProxy string `json:"httpsProxy,omitempty"` + NoProxy string `json:"noProxy,omitempty"` + FTPProxy string `json:"ftpProxy,omitempty"` +} + +// KubernetesConfig contains Kubernetes orchestrator settings +type KubernetesConfig struct { + AllNamespaces string `json:"allNamespaces,omitempty"` +} + +// New initializes an empty configuration file for the given filename 'fn' +func New(fn string) *ConfigFile { + return &ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + HTTPHeaders: make(map[string]string), + Filename: fn, + Plugins: make(map[string]map[string]string), + Aliases: make(map[string]string), + } +} + +// LegacyLoadFromReader reads the non-nested configuration data given and sets up the +// auth config information with given directory and populates the receiver object +func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { + b, err := ioutil.ReadAll(configData) + if err != nil { + return err + } + + if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return errors.Errorf("The Auth config file is empty") + } + authConfig := types.AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return errors.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) + if err != nil { + return err + } + authConfig.ServerAddress = defaultIndexServer + configFile.AuthConfigs[defaultIndexServer] = authConfig + } else { + for k, authConfig := range configFile.AuthConfigs { + authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) + if err != nil { + return err + } + authConfig.Auth = "" + authConfig.ServerAddress = k + configFile.AuthConfigs[k] = authConfig + } + } + return nil +} + +// LoadFromReader reads the configuration data given and sets up the auth config +// information with given directory and populates the receiver object +func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { + if err := json.NewDecoder(configData).Decode(&configFile); err != nil { + return err + } + var err error + for addr, ac := range configFile.AuthConfigs { + if ac.Auth != "" { + ac.Username, ac.Password, err = decodeAuth(ac.Auth) + if err != nil { + return err + } + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + return checkKubernetesConfiguration(configFile.Kubernetes) +} + +// ContainsAuth returns whether there is authentication configured +// in this file or not. +func (configFile *ConfigFile) ContainsAuth() bool { + return configFile.CredentialsStore != "" || + len(configFile.CredentialHelpers) > 0 || + len(configFile.AuthConfigs) > 0 +} + +// GetAuthConfigs returns the mapping of repo to auth configuration +func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig { + return configFile.AuthConfigs +} + +// SaveToWriter encodes and writes out all the authorization information to +// the given writer +func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() error { + if configFile.Filename == "" { + return errors.Errorf("Can't save config with empty filename") + } + + dir := filepath.Dir(configFile.Filename) + if err := os.MkdirAll(dir, 0700); err != nil { + return err + } + temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename)) + if err != nil { + return err + } + err = configFile.SaveToWriter(temp) + temp.Close() + if err != nil { + os.Remove(temp.Name()) + return err + } + return os.Rename(temp.Name(), configFile.Filename) +} + +// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and +// then checking this against any environment variables provided to the container +func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { + var cfgKey string + + if _, ok := configFile.Proxies[host]; !ok { + cfgKey = "default" + } else { + cfgKey = host + } + + config := configFile.Proxies[cfgKey] + permitted := map[string]*string{ + "HTTP_PROXY": &config.HTTPProxy, + "HTTPS_PROXY": &config.HTTPSProxy, + "NO_PROXY": &config.NoProxy, + "FTP_PROXY": &config.FTPProxy, + } + m := runOpts + if m == nil { + m = make(map[string]*string) + } + for k := range permitted { + if *permitted[k] == "" { + continue + } + if _, ok := m[k]; !ok { + m[k] = permitted[k] + } + if _, ok := m[strings.ToLower(k)]; !ok { + m[strings.ToLower(k)] = permitted[k] + } + } + return m +} + +// encodeAuth creates a base64 encoded string to containing authorization information +func encodeAuth(authConfig *types.AuthConfig) string { + if authConfig.Username == "" && authConfig.Password == "" { + return "" + } + + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", errors.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", errors.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} + +// GetCredentialsStore returns a new credentials store from the settings in the +// configuration file +func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { + if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { + return newNativeStore(configFile, helper) + } + return credentials.NewFileStore(configFile) +} + +// var for unit testing. +var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store { + return credentials.NewNativeStore(configFile, helperSuffix) +} + +// GetAuthConfig for a repository from the credential store +func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) { + return configFile.GetCredentialsStore(registryHostname).Get(registryHostname) +} + +// getConfiguredCredentialStore returns the credential helper configured for the +// given registry, the default credsStore, or the empty string if neither are +// configured. +func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string { + if c.CredentialHelpers != nil && registryHostname != "" { + if helper, exists := c.CredentialHelpers[registryHostname]; exists { + return helper + } + } + return c.CredentialsStore +} + +// GetAllCredentials returns all of the credentials stored in all of the +// configured credential stores. +func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { + auths := make(map[string]types.AuthConfig) + addAll := func(from map[string]types.AuthConfig) { + for reg, ac := range from { + auths[reg] = ac + } + } + + defaultStore := configFile.GetCredentialsStore("") + newAuths, err := defaultStore.GetAll() + if err != nil { + return nil, err + } + addAll(newAuths) + + // Auth configs from a registry-specific helper should override those from the default store. + for registryHostname := range configFile.CredentialHelpers { + newAuth, err := configFile.GetAuthConfig(registryHostname) + if err != nil { + return nil, err + } + auths[registryHostname] = newAuth + } + return auths, nil +} + +// GetFilename returns the file name that this config file is based on. +func (configFile *ConfigFile) GetFilename() string { + return configFile.Filename +} + +// PluginConfig retrieves the requested option for the given plugin. +func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) { + if configFile.Plugins == nil { + return "", false + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + return "", false + } + value, ok := pluginConfig[option] + return value, ok +} + +// SetPluginConfig sets the option to the given value for the given +// plugin. Passing a value of "" will remove the option. If removing +// the final config item for a given plugin then also cleans up the +// overall plugin entry. +func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) { + if configFile.Plugins == nil { + configFile.Plugins = make(map[string]map[string]string) + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + pluginConfig = make(map[string]string) + configFile.Plugins[pluginname] = pluginConfig + } + if value != "" { + pluginConfig[option] = value + } else { + delete(pluginConfig, option) + } + if len(pluginConfig) == 0 { + delete(configFile.Plugins, pluginname) + } +} + +func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error { + if kubeConfig == nil { + return nil + } + switch kubeConfig.AllNamespaces { + case "": + case "enabled": + case "disabled": + default: + return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces) + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/cli/cli/config/credentials/credentials.go b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/credentials.go new file mode 100644 index 000000000..28d58ec48 --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store.go new file mode 100644 index 000000000..7a760f1a9 --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store.go @@ -0,0 +1,21 @@ +package credentials + +import ( + "os/exec" +) + +// DetectDefaultStore return the default credentials store for the platform if +// the store executable is available. +func DetectDefaultStore(store string) string { + platformDefault := defaultCredentialsStore() + + // user defined or no default for platform + if store != "" || platformDefault == "" { + return store + } + + if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { + return platformDefault + } + return "" +} diff --git a/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go new file mode 100644 index 000000000..5d42dec62 --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "osxkeychain" +} diff --git a/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go new file mode 100644 index 000000000..a9012c6d4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go @@ -0,0 +1,13 @@ +package credentials + +import ( + "os/exec" +) + +func defaultCredentialsStore() string { + if _, err := exec.LookPath("pass"); err == nil { + return "pass" + } + + return "secretservice" +} diff --git a/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go new file mode 100644 index 000000000..3028168ac --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go @@ -0,0 +1,7 @@ +// +build !windows,!darwin,!linux + +package credentials + +func defaultCredentialsStore() string { + return "" +} diff --git a/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go new file mode 100644 index 000000000..bb799ca61 --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "wincred" +} diff --git a/extender/code/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/file_store.go new file mode 100644 index 000000000..e509820b7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -0,0 +1,81 @@ +package credentials + +import ( + "strings" + + "github.com/docker/cli/cli/config/types" +) + +type store interface { + Save() error + GetAuthConfigs() map[string]types.AuthConfig + GetFilename() string +} + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file store +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file store) Store { + return &fileStore{file: file} +} + +// Erase removes the given credentials from the file store. +func (c *fileStore) Erase(serverAddress string) error { + delete(c.file.GetAuthConfigs(), serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.GetAuthConfigs()[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for r, ac := range c.file.GetAuthConfigs() { + if serverAddress == ConvertToHostname(r) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.GetAuthConfigs(), nil +} + +// Store saves the given credentials in the file store. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig + return c.file.Save() +} + +func (c *fileStore) GetFilename() string { + return c.file.GetFilename() +} + +func (c *fileStore) IsFileStore() bool { + return true +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} diff --git a/extender/code/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/native_store.go new file mode 100644 index 000000000..afe542cc3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/cli/config/credentials/native_store.go @@ -0,0 +1,143 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" + tokenUsername = "" +) + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + programFunc client.ProgramFunc + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file store, helperSuffix string) Store { + name := remoteCredentialsPrefix + helperSuffix + return &nativeStore{ + programFunc: client.NewShellProgramFunc(name), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := client.Erase(c.programFunc, serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, err := c.listCredentialsInStore() + if err != nil { + return nil, err + } + + // Emails are only stored in the file store. + // This call can be safely eliminated when emails are removed. + fileConfigs, _ := c.fileStore.GetAll() + + authConfigs := make(map[string]types.AuthConfig) + for registry := range auths { + creds, err := c.getCredentialsFromStore(registry) + if err != nil { + return nil, err + } + ac := fileConfigs[registry] // might contain Email + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + authConfigs[registry] = ac + } + + return authConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + creds := &credentials.Credentials{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + return client.Store(c.programFunc, creds) +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + creds, err := client.Get(c.programFunc, serverAddress) + if err != nil { + if credentials.IsErrCredentialsNotFound(err) { + // do not return an error if the credentials are not + // in the keychain. Let docker ask for new credentials. + return ret, nil + } + return ret, err + } + + if creds.Username == tokenUsername { + ret.IdentityToken = creds.Secret + } else { + ret.Password = creds.Secret + ret.Username = creds.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// listCredentialsInStore returns a listing of stored credentials as a map of +// URL -> username. +func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { + return client.List(c.programFunc) +} diff --git a/extender/code/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/extender/code/vendor/github.com/docker/cli/cli/config/types/authconfig.go new file mode 100644 index 000000000..056af6b84 --- /dev/null +++ b/extender/code/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/extender/code/vendor/github.com/docker/distribution/.gitignore b/extender/code/vendor/github.com/docker/distribution/.gitignore new file mode 100644 index 000000000..4cf7888e9 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/.gitignore @@ -0,0 +1,38 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# never checkin from the bin file (for now) +bin/* + +# Test key files +*.pem + +# Cover profiles +*.out + +# Editor/IDE specific files. +*.sublime-project +*.sublime-workspace +.idea/* diff --git a/extender/code/vendor/github.com/docker/distribution/.gometalinter.json b/extender/code/vendor/github.com/docker/distribution/.gometalinter.json new file mode 100644 index 000000000..9df5b14bc --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/.gometalinter.json @@ -0,0 +1,16 @@ +{ + "Vendor": true, + "Deadline": "2m", + "Sort": ["linter", "severity", "path", "line"], + "EnableGC": true, + "Enable": [ + "structcheck", + "staticcheck", + "unconvert", + + "gofmt", + "goimports", + "golint", + "vet" + ] +} diff --git a/extender/code/vendor/github.com/docker/distribution/.mailmap b/extender/code/vendor/github.com/docker/distribution/.mailmap new file mode 100644 index 000000000..0f48321d4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/.mailmap @@ -0,0 +1,32 @@ +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland +Brian Bland Brian Bland +Josh Hawn Josh Hawn +Richard Scothern Richard +Richard Scothern Richard Scothern +Andrew Meredith Andrew Meredith +harche harche +Jessie Frazelle +Sharif Nassar Sharif Nassar +Sven Dowideit Sven Dowideit +Vincent Giersch Vincent Giersch +davidli davidli +Omer Cohen Omer Cohen +Eric Yang Eric Yang +Nikita Tarasov Nikita +Yu Wang yuwaMSFT2 +Yu Wang Yu Wang (UC) +Olivier Gambier dmp +Olivier Gambier Olivier +Olivier Gambier Olivier +Elsan Li 李楠 elsanli(李楠) +Rui Cao ruicao +Gwendolynne Barr gbarr01 +Haibing Zhou 周海兵 zhouhaibing089 +Feng Honglin tifayuki +Helen Xie Helen-xie +Mike Brown Mike Brown +Manish Tomar Manish Tomar +Sakeven Jiang sakeven diff --git a/extender/code/vendor/github.com/docker/distribution/.travis.yml b/extender/code/vendor/github.com/docker/distribution/.travis.yml new file mode 100644 index 000000000..44ced6045 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/.travis.yml @@ -0,0 +1,51 @@ +dist: trusty +sudo: required +# setup travis so that we can run containers for integration tests +services: + - docker + +language: go + +go: + - "1.11.x" + +go_import_path: github.com/docker/distribution + +addons: + apt: + packages: + - python-minimal + + +env: + - TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1 + +before_install: + - uname -r + - sudo apt-get -q update + +install: + - go get -u github.com/vbatts/git-validation + # TODO: Add enforcement of license + # - go get -u github.com/kunalkushwaha/ltag + - cd $TRAVIS_BUILD_DIR + +script: + - export GOOS=$TRAVIS_GOOS + - export CGO_ENABLED=$TRAVIS_CGO_ENABLED + - DCO_VERBOSITY=-q script/validate/dco + - GOOS=linux script/setup/install-dev-tools + - script/validate/vendor + - go build -i . + - make check + - make build + - make binaries + # Currently takes too long + #- if [ "$GOOS" = "linux" ]; then make test-race ; fi + - if [ "$GOOS" = "linux" ]; then make coverage ; fi + +after_success: + - bash <(curl -s https://codecov.io/bash) -F linux + +before_deploy: + # Run tests with storage driver configurations diff --git a/extender/code/vendor/github.com/docker/distribution/BUILDING.md b/extender/code/vendor/github.com/docker/distribution/BUILDING.md new file mode 100644 index 000000000..2981d016b --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/BUILDING.md @@ -0,0 +1,117 @@ + +# Building the registry source + +## Use-case + +This is useful if you intend to actively work on the registry. + +### Alternatives + +Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). + +People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. + +OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). + +### Gotchas + +You are expected to know your way around with go & git. + +If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. + +## Build the development environment + +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the +`registry` command from the current latest: + + go get github.com/docker/distribution/cmd/registry + +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + + mkdir -p /var/lib/registry + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` +binary can then be run with the following: + + $ $GOPATH/bin/registry --version + $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown + +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + +The registry can be run with the default config using the following +incantation: + + $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml + INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] debug server listening localhost:5001 + +If it is working, one should see the above log messages. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +A `Makefile` has been provided as a convenience to support repeatable builds. +Please install the following into `GOPATH` for it to work: + + go get github.com/golang/lint/golint + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + + $ make + + clean + + fmt + + vet + + lint + + build + github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar + github.com/sirupsen/logrus + github.com/docker/libtrust + ... + github.com/yvasiyarov/gorelic + github.com/docker/distribution/registry/handlers + github.com/docker/distribution/cmd/registry + + test + ... + ok github.com/docker/distribution/digest 7.875s + ok github.com/docker/distribution/manifest 0.028s + ok github.com/docker/distribution/notifications 17.322s + ? github.com/docker/distribution/registry [no test files] + ok github.com/docker/distribution/registry/api/v2 0.101s + ? github.com/docker/distribution/registry/auth [no test files] + ok github.com/docker/distribution/registry/auth/silly 0.011s + ... + + /Users/sday/go/src/github.com/docker/distribution/bin/registry + + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + + binaries + +The above provides a repeatable build using the contents of the vendor +directory. This includes formatting, vetting, linting, building, +testing and generating tagged binaries. We can verify this worked by running +the registry binary generated in the "./bin" directory: + + $ ./bin/registry --version + ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. diff --git a/extender/code/vendor/github.com/docker/distribution/CONTRIBUTING.md b/extender/code/vendor/github.com/docker/distribution/CONTRIBUTING.md new file mode 100644 index 000000000..4c067d9e7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/CONTRIBUTING.md @@ -0,0 +1,148 @@ +# Contributing to the registry + +## Before reporting an issue... + +### If your problem is with... + + - automated builds + - your account on the [Docker Hub](https://hub.docker.com/) + - any other [Docker Hub](https://hub.docker.com/) issue + +Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) + +### If you... + + - need help setting up your registry + - can't figure out something + - are not sure what's going on or what your problem is + +Then please do not open an issue here yet - you should first try one of the following support forums: + + - irc: #docker-distribution on freenode + - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution + +### Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +## Reporting an issue properly + +By following these simple rules you will get better and faster feedback on your issue. + + - search the bugtracker for an already reported issue + +### If you found an issue that describes your problem: + + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. + +### If you have not found an existing issue that describes your problem: + + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of: + - `docker version` + - `docker info` + - `docker exec registry --version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry + +## Contributing a patch for a known bug, or a small correction + +You should follow the basic GitHub workflow: + + 1. fork + 2. commit a change + 3. make sure the tests pass + 4. PR + +Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: + + - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` + - sign your commits using `-s`: `git commit -s -m "My commit"` + +Some simple rules to ensure quick merge: + + - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) + - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once + - if you need to amend your PR following comments, please squash instead of adding more commits + +## Contributing new features + +You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. + +If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. + +Then you should submit your implementation, clearly linking to the issue (and possible proposal). + +Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. + +It's mandatory to: + + - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) + - address maintainers' comments and modify your submission accordingly + - write tests for any new code + +Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. + +Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](http://golang.org/doc/effective_go.html). The +[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/extender/code/vendor/github.com/docker/distribution/Dockerfile b/extender/code/vendor/github.com/docker/distribution/Dockerfile new file mode 100644 index 000000000..9537817ca --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/Dockerfile @@ -0,0 +1,23 @@ +FROM golang:1.11-alpine AS build + +ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution +ENV BUILDTAGS include_oss include_gcs + +ARG GOOS=linux +ARG GOARCH=amd64 +ARG GOARM=6 + +RUN set -ex \ + && apk add --no-cache make git file + +WORKDIR $DISTRIBUTION_DIR +COPY . $DISTRIBUTION_DIR +RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked" + +FROM alpine +COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml +COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry +VOLUME ["/var/lib/registry"] +EXPOSE 5000 +ENTRYPOINT ["registry"] +CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/extender/code/vendor/github.com/docker/distribution/LICENSE b/extender/code/vendor/github.com/docker/distribution/LICENSE new file mode 100644 index 000000000..e06d20818 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/extender/code/vendor/github.com/docker/distribution/MAINTAINERS b/extender/code/vendor/github.com/docker/distribution/MAINTAINERS new file mode 100644 index 000000000..3183620c5 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/MAINTAINERS @@ -0,0 +1,243 @@ +# Distribution maintainers file +# +# This file describes who runs the docker/distribution project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# + +[Rules] + + [Rules.maintainers] + + title = "What is a maintainer?" + + text = """ +There are different types of maintainers, with different responsibilities, but +all maintainers have 3 things in common: + +1) They share responsibility in the project's success. +2) They have made a long-term, recurring time investment to improve the project. +3) They spend that time doing whatever needs to be done, not necessarily what +is the most interesting or fun. + +Maintainers are often under-appreciated, because their work is harder to appreciate. +It's easy to appreciate a really cool and technically advanced feature. It's harder +to appreciate the absence of bugs, the slow but steady improvement in stability, +or the reliability of a release process. But those things distinguish a good +project from a great one. +""" + + [Rules.reviewer] + + title = "What is a reviewer?" + + text = """ +A reviewer is a core role within the project. +They share in reviewing issues and pull requests and their LGTM count towards the +required LGTM count to merge a code change into the project. + +Reviewers are part of the organization but do not have write access. +Becoming a reviewer is a core aspect in the journey to becoming a maintainer. +""" + + [Rules.adding-maintainers] + + title = "How are maintainers added?" + + text = """ +Maintainers are first and foremost contributors that have shown they are +committed to the long term success of a project. Contributors wanting to become +maintainers are expected to be deeply involved in contributing code, pull +request review, and triage of issues in the project for more than three months. + +Just contributing does not make you a maintainer, it is about building trust +with the current maintainers of the project and being a person that they can +depend on and trust to make decisions in the best interest of the project. + +Periodically, the existing maintainers curate a list of contributors that have +shown regular activity on the project over the prior months. From this list, +maintainer candidates are selected and proposed on the maintainers mailing list. + +After a candidate has been announced on the maintainers mailing list, the +existing maintainers are given five business days to discuss the candidate, +raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing +list. Only maintainers of the repository that the candidate is proposed for are +allowed to vote. + +If a candidate is approved, a maintainer will contact the candidate to invite +the candidate to open a pull request that adds the contributor to the +MAINTAINERS file. The candidate becomes a maintainer once the pull request is +merged. +""" + + [Rules.stepping-down-policy] + + title = "Stepping down policy" + + text = """ +Life priorities, interests, and passions can change. If you're a maintainer but +feel you must remove yourself from the list, inform other maintainers that you +intend to step down, and if possible, help find someone to pick up your work. +At the very least, ensure your work can be continued where you left off. + +After you've informed other maintainers, create a pull request to remove +yourself from the MAINTAINERS file. +""" + + [Rules.inactive-maintainers] + + title = "Removal of inactive maintainers" + + text = """ +Similar to the procedure for adding new maintainers, existing maintainers can +be removed from the list if they do not show significant activity on the +project. Periodically, the maintainers review the list of maintainers and their +activity over the last three months. + +If a maintainer has shown insufficient activity over this period, a neutral +person will contact the maintainer to ask if they want to continue being +a maintainer. If the maintainer decides to step down as a maintainer, they +open a pull request to be removed from the MAINTAINERS file. + +If the maintainer wants to remain a maintainer, but is unable to perform the +required duties they can be removed with a vote of at least 66% of +the current maintainers. An e-mail is sent to the +mailing list, inviting maintainers of the project to vote. The voting period is +five business days. Issues related to a maintainer's performance should be +discussed with them among the other maintainers so that they are not surprised +by a pull request removing them. +""" + + [Rules.decisions] + + title = "How are decisions made?" + + text = """ +Short answer: EVERYTHING IS A PULL REQUEST. + +distribution is an open-source project with an open design philosophy. This means +that the repository is the source of truth for EVERY aspect of the project, +including its philosophy, design, road map, and APIs. *If it's part of the +project, it's in the repo. If it's in the repo, it's part of the project.* + +As a result, all decisions can be expressed as changes to the repository. An +implementation change is a change to the source code. An API change is a change +to the API specification. A philosophy change is a change to the philosophy +manifesto, and so on. + +All decisions affecting distribution, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Merge or refuse the pull request. Who does this depends on the nature +of the pull request and which areas of the project it affects. +""" + + [Rules.DCO] + + title = "Helping contributors with the DCO" + + text = """ +The [DCO or `Sign your work`]( +https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work) +requirement is not intended as a roadblock or speed bump. + +Some distribution contributors are not as familiar with `git`, or have used a web +based editor, and thus asking them to `git commit --amend -s` is not the best +way forward. + +In this case, maintainers can update the commits based on clause (c) of the DCO. +The most trivial way for a contributor to allow the maintainer to do this, is to +add a DCO signature in a pull requests's comment, or a maintainer can simply +note that the change is sufficiently trivial that it does not substantially +change the existing contribution - i.e., a spelling change. + +When you add someone's DCO, please also add your own to keep a log. +""" + + [Rules."no direct push"] + + title = "I'm a maintainer. Should I make pull requests too?" + + text = """ +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. +""" + + [Rules.tsc] + + title = "Conflict Resolution and technical disputes" + + text = """ +distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters." + """ + + [Rules.meta] + + title = "How is this process changed?" + + text = "Just like everything else: by making a pull request :)" + +# Current project organization +[Org] + + [Org.Maintainers] + people = [ + "dmcgowan", + "dmp42", + "stevvooe", + ] + [Org.Reviewers] + people = [ + "manishtomar", + "caervs", + "davidswu", + "RobbKistler" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.caervs] + Name = "Ryan Abrams" + Email = "rdabrams@gmail.com" + GitHub = "caervs" + + [people.davidswu] + Name = "David Wu" + Email = "dwu7401@gmail.com" + GitHub = "davidswu" + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@mcgstyle.net" + GitHub = "dmcgowan" + + [people.dmp42] + Name = "Olivier Gambier" + Email = "olivier@docker.com" + GitHub = "dmp42" + + [people.manishtomar] + Name = "Manish Tomar" + Email = "manish.tomar@docker.com" + GitHub = "manishtomar" + + [people.RobbKistler] + Name = "Robb Kistler" + Email = "robb.kistler@docker.com" + GitHub = "RobbKistler" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/extender/code/vendor/github.com/docker/distribution/Makefile b/extender/code/vendor/github.com/docker/distribution/Makefile new file mode 100644 index 000000000..4635c6eca --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/Makefile @@ -0,0 +1,102 @@ +# Root directory of the project (absolute path). +ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +# Used to populate version variable in main package. +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) +REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) + + +PKG=github.com/docker/distribution + +# Project packages. +PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/) +INTEGRATION_PACKAGE=${PKG} +COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES}) + + +# Project binaries. +COMMANDS=registry digest registry-api-descriptor-template + +# Allow turning off function inlining and variable registerization +ifeq (${DISABLE_OPTIMIZATION},true) + GO_GCFLAGS=-gcflags "-N -l" + VERSION:="$(VERSION)-noopt" +endif + +WHALE = "+" + +# Go files +# +TESTFLAGS_RACE= +GOFILES=$(shell find . -type f -name '*.go') +GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",) +GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)' + +BINARIES=$(addprefix bin/,$(COMMANDS)) + +# Flags passed to `go test` +TESTFLAGS ?= -v $(TESTFLAGS_RACE) +TESTFLAGS_PARALLEL ?= 8 + +.PHONY: all build binaries check clean test test-race test-full integration coverage +.DEFAULT: all + +all: binaries + +# This only needs to be generated by hand when cutting full releases. +version/version.go: + @echo "$(WHALE) $@" + ./version/version.sh > $@ + +check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") + @echo "$(WHALE) $@" + gometalinter --config .gometalinter.json ./... + +test: ## run tests, except integration test with test.short + @echo "$(WHALE) $@" + @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +test-race: ## run tests, except integration test with test.short and race + @echo "$(WHALE) $@" + @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +test-full: ## run tests, except integration tests + @echo "$(WHALE) $@" + @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +integration: ## run integration tests + @echo "$(WHALE) $@" + @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE} + +coverage: ## generate coverprofiles from the unit tests + @echo "$(WHALE) $@" + @rm -f coverage.txt + @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null + @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \ + go test ${GO_TAGS} ${TESTFLAGS} \ + -cover \ + -coverprofile=profile.out \ + -covermode=atomic $$pkg || exit; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi; \ + done ) + +FORCE: + +# Build a binary from a cmd. +bin/%: cmd/% FORCE + @echo "$(WHALE) $@${BINARY_SUFFIX}" + @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$< + +binaries: $(BINARIES) ## build binaries + @echo "$(WHALE) $@" + +build: + @echo "$(WHALE) $@" + @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES) + +clean: ## clean up binaries + @echo "$(WHALE) $@" + @rm -f $(BINARIES) diff --git a/extender/code/vendor/github.com/docker/distribution/README.md b/extender/code/vendor/github.com/docker/distribution/README.md new file mode 100644 index 000000000..998878850 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/README.md @@ -0,0 +1,130 @@ +# Distribution + +The Docker toolset to pack, ship, store, and deliver content. + +This repository's main product is the Docker Registry 2.0 implementation +for storing and distributing Docker images. It supersedes the +[docker/docker-registry](https://github.com/docker/docker-registry) +project with a new API design, focused around security and performance. + + + +[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) +[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) + +This repository contains the following components: + +|**Component** |Description | +|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | + +### How does this integrate with Docker engine? + +This project should provide an implementation to a V2 API for use in the [Docker +core project](https://github.com/docker/docker). The API should be embeddable +and simplify the process of securely pulling and pushing content from `docker` +daemons. + +### What are the long term goals of the Distribution project? + +The _Distribution_ project has the further long term goal of providing a +secure tool chain for distributing content. The specifications, APIs and tools +should be as useful with Docker as they are without. + +Our goal is to design a professional grade and extensible content distribution +system that allow users to: + +* Enjoy an efficient, secured and reliable way to store, manage, package and + exchange content +* Hack/roll their own on top of healthy open-source components +* Implement their own home made solution through good specs, and solid + extensions mechanism. + +## More about Registry 2.0 + +The new registry implementation provides the following benefits: + +- faster push and pull +- new, more efficient implementation +- simplified deployment +- pluggable storage backend +- webhook notifications + +For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). + +### Who needs to deploy a registry? + +By default, Docker users pull images from Docker's public registry instance. +[Installing Docker](https://docs.docker.com/engine/installation/) gives users this +ability. Users can also push images to a repository on Docker's public registry, +if they have a [Docker Hub](https://hub.docker.com/) account. + +For some users and even companies, this default behavior is sufficient. For +others, it is not. + +For example, users with their own software products may want to maintain a +registry for private, company images. Also, you may wish to deploy your own +image repository for images used to test or in continuous integration. For these +use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) +may be the better choice. + +### Migration to Registry 2.0 + +For those who have previously deployed their own registry based on the Registry +1.0 implementation and wish to deploy a Registry 2.0 while retaining images, +data migration is required. A tool to assist with migration efforts has been +created. For more information see [docker/migrator](https://github.com/docker/migrator). + +## Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. If you are contributing code, see +the instructions for [building a development environment](BUILDING.md). + +## Support + +If any issues are encountered while using the _Distribution_ project, several +avenues are available for support: + + + + + + + + + + + + + + + + + + +
+ IRC + + #docker-distribution on FreeNode +
+ Issue Tracker + + github.com/docker/distribution/issues +
+ Google Groups + + https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +
+ Mailing List + + docker@dockerproject.org +
+ + +## License + +This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/extender/code/vendor/github.com/docker/distribution/ROADMAP.md b/extender/code/vendor/github.com/docker/distribution/ROADMAP.md new file mode 100644 index 000000000..701127afe --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/ROADMAP.md @@ -0,0 +1,267 @@ +# Roadmap + +The Distribution Project consists of several components, some of which are +still being defined. This document defines the high-level goals of the +project, identifies the current components, and defines the release- +relationship to the Docker Platform. + +* [Distribution Goals](#distribution-goals) +* [Distribution Components](#distribution-components) +* [Project Planning](#project-planning): release-relationship to the Docker Platform. + +This road map is a living document, providing an overview of the goals and +considerations made in respect of the future of the project. + +## Distribution Goals + +- Replace the existing [docker registry](github.com/docker/docker-registry) + implementation as the primary implementation. +- Replace the existing push and pull code in the docker engine with the + distribution package. +- Define a strong data model for distributing docker images +- Provide a flexible distribution tool kit for use in the docker platform +- Unlock new distribution models + +## Distribution Components + +Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming +features and bugfixes for a component will be added to the relevant milestone. If a feature or +bugfix is not part of a milestone, it is currently unscheduled for +implementation. + +* [Registry](#registry) +* [Distribution Package](#distribution-package) + +*** + +### Registry + +The new Docker registry is the main portion of the distribution repository. +Registry 2.0 is the first release of the next-generation registry. This was +primarily focused on implementing the [new registry +API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), +with a focus on security and performance. + +Following from the Distribution project goals above, we have a set of goals +for registry v2 that we would like to follow in the design. New features +should be compared against these goals. + +#### Data Storage and Distribution First + +The registry's first goal is to provide a reliable, consistent storage +location for Docker images. The registry should only provide the minimal +amount of indexing required to fetch image data and no more. + +This means we should be selective in new features and API additions, including +those that may require expensive, ever growing indexes. Requests should be +servable in "constant time". + +#### Content Addressability + +All data objects used in the registry API should be content addressable. +Content identifiers should be secure and verifiable. This provides a secure, +reliable base from which to build more advanced content distribution systems. + +#### Content Agnostic + +In the past, changes to the image format would require large changes in Docker +and the Registry. By decoupling the distribution and image format, we can +allow the formats to progress without having to coordinate between the two. +This means that we should be focused on decoupling Docker from the registry +just as much as decoupling the registry from Docker. Such an approach will +allow us to unlock new distribution models that haven't been possible before. + +We can take this further by saying that the new registry should be content +agnostic. The registry provides a model of names, tags, manifests and content +addresses and that model can be used to work with content. + +#### Simplicity + +The new registry should be closer to a microservice component than its +predecessor. This means it should have a narrower API and a low number of +service dependencies. It should be easy to deploy. + +This means that other solutions should be explored before changing the API or +adding extra dependencies. If functionality is required, can it be added as an +extension or companion service. + +#### Extensibility + +The registry should provide extension points to add functionality. By keeping +the scope narrow, but providing the ability to add functionality. + +Features like search, indexing, synchronization and registry explorers fall +into this category. No such feature should be added unless we've found it +impossible to do through an extension. + +#### Active Feature Discussions + +The following are feature discussions that are currently active. + +If you don't see your favorite, unimplemented feature, feel free to contact us +via IRC or the mailing list and we can talk about adding it. The goal here is +to make sure that new features go through a rigid design process before +landing in the registry. + +##### Proxying to other Registries + +A _pull-through caching_ mode exists for the registry, but is restricted from +within the docker client to only mirror the official Docker Hub. This functionality +can be expanded when image provenance has been specified and implemented in the +distribution project. + +##### Metadata storage + +Metadata for the registry is currently stored with the manifest and layer data on +the storage backend. While this is a big win for simplicity and reliably maintaining +state, it comes with the cost of consistency and high latency. The mutable registry +metadata operations should be abstracted behind an API which will allow ACID compliant +storage systems to handle metadata. + +##### Peer to Peer transfer + +Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit + +##### Indexing, Search and Discovery + +The original registry provided some implementation of search for use with +private registries. Support has been elided from V2 since we'd like to both +decouple search functionality from the registry. The makes the registry +simpler to deploy, especially in use cases where search is not needed, and +let's us decouple the image format from the registry. + +There are explorations into using the catalog API and notification system to +build external indexes. The current line of thought is that we will define a +common search API to index and query docker images. Such a system could be run +as a companion to a registry or set of registries to power discovery. + +The main issue with search and discovery is that there are so many ways to +accomplish it. There are two aspects to this project. The first is deciding on +how it will be done, including an API definition that can work with changing +data formats. The second is the process of integrating with `docker search`. +We expect that someone attempts to address the problem with the existing tools +and propose it as a standard search API or uses it to inform a standardization +process. Once this has been explored, we integrate with the docker client. + +Please see the following for more detail: + +- https://github.com/docker/distribution/issues/206 + +##### Deletes + +> __NOTE:__ Deletes are a much asked for feature. Before requesting this +feature or participating in discussion, we ask that you read this section in +full and understand the problems behind deletes. + +While, at first glance, implementing deleting seems simple, there are a number +mitigating factors that make many solutions not ideal or even pathological in +the context of a registry. The following paragraph discuss the background and +approaches that could be applied to arrive at a solution. + +The goal of deletes in any system is to remove unused or unneeded data. Only +data requested for deletion should be removed and no other data. Removing +unintended data is worse than _not_ removing data that was requested for +removal but ideally, both are supported. Generally, according to this rule, we +err on holding data longer than needed, ensuring that it is only removed when +we can be certain that it can be removed. With the current behavior, we opt to +hold onto the data forever, ensuring that data cannot be incorrectly removed. + +To understand the problems with implementing deletes, one must understand the +data model. All registry data is stored in a filesystem layout, implemented on +a "storage driver", effectively a _virtual file system_ (VFS). The storage +system must assume that this VFS layer will be eventually consistent and has +poor read- after-write consistency, since this is the lower common denominator +among the storage drivers. This is mitigated by writing values in reverse- +dependent order, but makes wider transactional operations unsafe. + +Layered on the VFS model is a content-addressable _directed, acyclic graph_ +(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. +Since the same data can be referenced by multiple manifests, we only store +data once, even if it is in different repositories. Thus, we have a set of +blobs, referenced by tags and manifests. If we want to delete a blob we need +to be certain that it is no longer referenced by another manifest or tag. When +we delete a manifest, we also can try to delete the referenced blobs. Deciding +whether or not a blob has an active reference is the crux of the problem. + +Conceptually, deleting a manifest and its resources is quite simple. Just find +all the manifests, enumerate the referenced blobs and delete the blobs not in +that set. An astute observer will recognize this as a garbage collection +problem. As with garbage collection in programming languages, this is very +simple when one always has a consistent view. When one adds parallelism and an +inconsistent view of data, it becomes very challenging. + +A simple example can demonstrate this. Let's say we are deleting a manifest +_A_ in one process. We scan the manifest and decide that all the blobs are +ready for deletion. Concurrently, we have another process accepting a new +manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ +is accepted and all the blobs are considered present, so the operation +proceeds. The original process then deletes the referenced blobs, assuming +they were unreferenced. The manifest _B_, which we thought had all of its data +present, can no longer be served by the registry, since the dependent data has +been deleted. + +Deleting data from the registry safely requires some way to coordinate this +operation. The following approaches are being considered: + +- _Reference Counting_ - Maintain a count of references to each blob. This is + challenging for a number of reasons: 1. maintaining a consistent consensus + of reference counts across a set of Registries and 2. Building the initial + list of reference counts for an existing registry. These challenges can be + met with a consensus protocol like Paxos or Raft in the first case and a + necessary but simple scan in the second.. +- _Lock the World GC_ - Halt all writes to the data store. Walk the data store + and find all blob references. Delete all unreferenced blobs. This approach + is very simple but requires disabling writes for a period of time while the + service reads all data. This is slow and expensive but very accurate and + effective. +- _Generational GC_ - Do something similar to above but instead of blocking + writes, writes are sent to another storage backend while reads are broadcast + to the new and old backends. GC is then performed on the read-only portion. + Because writes land in the new backend, the data in the read-only section + can be safely deleted. The main drawbacks of this approach are complexity + and coordination. +- _Centralized Oracle_ - Using a centralized, transactional database, we can + know exactly which data is referenced at any given time. This avoids + coordination problem by managing this data in a single location. We trade + off metadata scalability for simplicity and performance. This is a very good + option for most registry deployments. This would create a bottleneck for + registry metadata. However, metadata is generally not the main bottleneck + when serving images. + +Please let us know if other solutions exist that we have yet to enumerate. +Note that for any approach, implementation is a massive consideration. For +example, a mark-sweep based solution may seem simple but the amount of work in +coordination offset the extra work it might take to build a _Centralized +Oracle_. We'll accept proposals for any solution but please coordinate with us +before dropping code. + +At this time, we have traded off simplicity and ease of deployment for disk +space. Simplicity and ease of deployment tend to reduce developer involvement, +which is currently the most expensive resource in software engineering. Taking +on any solution for deletes will greatly effect these factors, trading off +very cheap disk space for a complex deployment and operational story. + +Please see the following issues for more detail: + +- https://github.com/docker/distribution/issues/422 +- https://github.com/docker/distribution/issues/461 +- https://github.com/docker/distribution/issues/462 + +### Distribution Package + +At its core, the Distribution Project is a set of Go packages that make up +Distribution Components. At this time, most of these packages make up the +Registry implementation. + +The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. + +For feature additions, please see the Registry section. In the future, we may break out a +separate Roadmap for distribution-specific features that apply to more than +just the registry. + +*** + +### Project Planning + +An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. + diff --git a/extender/code/vendor/github.com/docker/distribution/blobs.go b/extender/code/vendor/github.com/docker/distribution/blobs.go new file mode 100644 index 000000000..c0e9261be --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/blobs.go @@ -0,0 +1,265 @@ +package distribution + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // ErrBlobExists returned when blob already exists + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") +) + +// ErrBlobInvalidDigest returned when digest check fails. +type ErrBlobInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrBlobInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} + +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // This should only be used when referring to a manifest. + Platform *v1.Platform `json:"platform,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + +// BlobStatter makes blob descriptors available by digest. The service may +// provide a descriptor of a different digest if the provided digest is not +// canonical. +type BlobStatter interface { + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) +} + +// BlobDeleter enables deleting blobs from storage. +type BlobDeleter interface { + Delete(ctx context.Context, dgst digest.Digest) error +} + +// BlobEnumerator enables iterating over blobs from storage +type BlobEnumerator interface { + Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error +} + +// BlobDescriptorService manages metadata about a blob by digest. Most +// implementations will not expose such an interface explicitly. Such mappings +// should be maintained by interacting with the BlobIngester. Hence, this is +// left off of BlobService and BlobStore. +type BlobDescriptorService interface { + BlobStatter + + // SetDescriptor assigns the descriptor to the digest. The provided digest and + // the digest in the descriptor must map to identical content but they may + // differ on their algorithm. The descriptor must have the canonical + // digest of the content and the digest algorithm must match the + // annotators canonical algorithm. + // + // Such a facility can be used to map blobs between digest domains, with + // the restriction that the algorithm of the descriptor must match the + // canonical algorithm (ie sha256) of the annotator. + SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + + // Clear enables descriptors to be unlinked + Clear(ctx context.Context, dgst digest.Digest) error +} + +// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. +type BlobDescriptorServiceFactory interface { + BlobAccessController(svc BlobDescriptorService) BlobDescriptorService +} + +// ReadSeekCloser is the primary reader type for blob data, combining +// io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// BlobProvider describes operations for getting blob data. +type BlobProvider interface { + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, dgst digest.Digest) ([]byte, error) + + // Open provides a ReadSeekCloser to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error will be + // returned. + Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) +} + +// BlobServer can serve blobs via http. +type BlobServer interface { + // ServeBlob attempts to serve the blob, identified by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error +} + +// BlobIngester ingests blob data. +type BlobIngester interface { + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) +} + +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + +// CreateOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type CreateOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + // Stat allows to pass precalculated descriptor to link and return. + // Blob access check will be skipped if set. + Stat *Descriptor + } +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteCloser + io.ReaderFrom + + // Size returns the number of bytes written to this blob. + Size() int64 + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // StartedAt returns the time this blob write was started. + StartedAt() time.Time + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error +} + +// BlobService combines the operations to access, read and write blobs. This +// can be used to describe remote blob services. +type BlobService interface { + BlobStatter + BlobProvider + BlobIngester +} + +// BlobStore represent the entire suite of blob related operations. Such an +// implementation can access, read, write, delete and serve blobs. +type BlobStore interface { + BlobService + BlobServer + BlobDeleter +} diff --git a/extender/code/vendor/github.com/docker/distribution/digestset/set.go b/extender/code/vendor/github.com/docker/distribution/digestset/set.go new file mode 100644 index 000000000..71327dca7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/digestset/set.go @@ -0,0 +1,247 @@ +package digestset + +import ( + "errors" + "sort" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (digest.Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg digest.Algorithm + hex string + ) + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []digest.Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]digest.Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[digest.Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg digest.Algorithm + val string + digest digest.Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/extender/code/vendor/github.com/docker/distribution/doc.go b/extender/code/vendor/github.com/docker/distribution/doc.go new file mode 100644 index 000000000..bdd8cb708 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/doc.go @@ -0,0 +1,7 @@ +// Package distribution will define the interfaces for the components of +// docker distribution. The goal is to allow users to reliably package, ship +// and store content related to docker images. +// +// This is currently a work in progress. More details are available in the +// README.md. +package distribution diff --git a/extender/code/vendor/github.com/docker/distribution/errors.go b/extender/code/vendor/github.com/docker/distribution/errors.go new file mode 100644 index 000000000..8e0b788d6 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/errors.go @@ -0,0 +1,119 @@ +package distribution + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + +// ErrManifestNotModified is returned when a conditional manifest GetByTag +// returns nil due to the client indicating it has the latest version +var ErrManifestNotModified = errors.New("manifest not modified") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed +var ErrUnsupported = errors.New("operation unsupported") + +// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 +// manifest but the registry is configured to reject it +var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") + +// ErrTagUnknown is returned if the given tag is not known by the tag service +type ErrTagUnknown struct { + Tag string +} + +func (err ErrTagUnknown) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + +// ErrRepositoryUnknown is returned if the named repository is not known by +// the registry. +type ErrRepositoryUnknown struct { + Name string +} + +func (err ErrRepositoryUnknown) Error() string { + return fmt.Sprintf("unknown repository name=%s", err.Name) +} + +// ErrRepositoryNameInvalid should be used to denote an invalid repository +// name. Reason may set, indicating the cause of invalidity. +type ErrRepositoryNameInvalid struct { + Name string + Reason error +} + +func (err ErrRepositoryNameInvalid) Error() string { + return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) +} + +// ErrManifestUnknown is returned if the manifest is not known by the +// registry. +type ErrManifestUnknown struct { + Name string + Tag string +} + +func (err ErrManifestUnknown) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnknownRevision is returned when a manifest cannot be found by +// revision within a repository. +type ErrManifestUnknownRevision struct { + Name string + Revision digest.Digest +} + +func (err ErrManifestUnknownRevision) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return "unverified manifest" +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ErrManifestBlobUnknown returned when a referenced blob cannot be found. +type ErrManifestBlobUnknown struct { + Digest digest.Digest +} + +func (err ErrManifestBlobUnknown) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} + +// ErrManifestNameInvalid should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type ErrManifestNameInvalid struct { + Name string + Reason error +} + +func (err ErrManifestNameInvalid) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/extender/code/vendor/github.com/docker/distribution/manifests.go b/extender/code/vendor/github.com/docker/distribution/manifests.go new file mode 100644 index 000000000..1816baea1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/manifests.go @@ -0,0 +1,125 @@ +package distribution + +import ( + "context" + "fmt" + "mime" + + "github.com/opencontainers/go-digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target +type Manifest interface { + // References returns a list of objects which make up this manifest. + // A reference is anything which can be represented by a + // distribution.Descriptor. These can consist of layers, resources or other + // manifests. + // + // While no particular order is required, implementations should return + // them from highest to lowest priority. For example, one might want to + // return the base layer before the top layer. + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the media type. + Payload() (mediaType string, payload []byte, err error) +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + // + // The destination of the reference is dependent on the manifest type and + // the dependency type. + AppendReference(dependency Describable) error +} + +// ManifestService describes operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(ctx context.Context, dgst digest.Digest) (bool, error) + + // Get retrieves the manifest specified by the given digest + Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest + Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) + + // Delete removes the manifest specified by the given digest. Deleting + // a manifest that doesn't exist will return ErrManifestNotFound + Delete(ctx context.Context, dgst digest.Digest) error +} + +// ManifestEnumerator enables iterating over manifests +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error +} + +// Describable is an interface for descriptors +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + if t != "" { + mediaTypes = append(mediaTypes, t) + } + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc, 0) + +// UnmarshalManifest looks up manifest unmarshal functions based on +// MediaType +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediaType string + if ctHeader != "" { + var err error + mediaType, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + + unmarshalFunc, ok := mappings[mediaType] + if !ok { + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) + } + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific +func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { + if _, ok := mappings[mediaType]; ok { + return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) + } + mappings[mediaType] = u + return nil +} diff --git a/extender/code/vendor/github.com/docker/distribution/reference/helpers.go b/extender/code/vendor/github.com/docker/distribution/reference/helpers.go new file mode 100644 index 000000000..978df7eab --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/extender/code/vendor/github.com/docker/distribution/reference/normalize.go b/extender/code/vendor/github.com/docker/distribution/reference/normalize.go new file mode 100644 index 000000000..2d71fc5e9 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/reference/normalize.go @@ -0,0 +1,170 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/extender/code/vendor/github.com/docker/distribution/reference/reference.go b/extender/code/vendor/github.com/docker/distribution/reference/reference.go new file mode 100644 index 000000000..2f66cca87 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if nameMatch != nil && len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/extender/code/vendor/github.com/docker/distribution/reference/regexp.go b/extender/code/vendor/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 000000000..786034932 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/extender/code/vendor/github.com/docker/distribution/registry.go b/extender/code/vendor/github.com/docker/distribution/registry.go new file mode 100644 index 000000000..6c3210989 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/registry.go @@ -0,0 +1,118 @@ +package distribution + +import ( + "context" + + "github.com/docker/distribution/reference" +) + +// Scope defines the set of items that match a namespace. +type Scope interface { + // Contains returns true if the name belongs to the namespace. + Contains(name string) bool +} + +type fullScope struct{} + +func (f fullScope) Contains(string) bool { + return true +} + +// GlobalScope represents the full namespace scope which contains +// all other scopes. +var GlobalScope = Scope(fullScope{}) + +// Namespace represents a collection of repositories, addressable by name. +// Generally, a namespace is backed by a set of one or more services, +// providing facilities such as registry access, trust, and indexing. +type Namespace interface { + // Scope describes the names that can be used with this Namespace. The + // global namespace will have a scope that matches all names. The scope + // effectively provides an identity for the namespace. + Scope() Scope + + // Repository should return a reference to the named repository. The + // registry may or may not have the repository but should always return a + // reference. + Repository(ctx context.Context, name reference.Named) (Repository, error) + + // Repositories fills 'repos' with a lexicographically sorted catalog of repositories + // up to the size of 'repos' and returns the value 'n' for the number of entries + // which were filled. 'last' contains an offset in the catalog, and 'err' will be + // set to io.EOF if there are no more entries to obtain. + Repositories(ctx context.Context, repos []string, last string) (n int, err error) + + // Blobs returns a blob enumerator to access all blobs + Blobs() BlobEnumerator + + // BlobStatter returns a BlobStatter to control + BlobStatter() BlobStatter +} + +// RepositoryEnumerator describes an operation to enumerate repositories +type RepositoryEnumerator interface { + Enumerate(ctx context.Context, ingester func(string) error) error +} + +// RepositoryRemover removes given repository +type RepositoryRemover interface { + Remove(ctx context.Context, name reference.Named) error +} + +// ManifestServiceOption is a function argument for Manifest Service methods +type ManifestServiceOption interface { + Apply(ManifestService) error +} + +// WithTag allows a tag to be passed into Put +func WithTag(tag string) ManifestServiceOption { + return WithTagOption{tag} +} + +// WithTagOption holds a tag +type WithTagOption struct{ Tag string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithTagOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// WithManifestMediaTypes lists the media types the client wishes +// the server to provide. +func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { + return WithManifestMediaTypesOption{mediaTypes} +} + +// WithManifestMediaTypesOption holds a list of accepted media types +type WithManifestMediaTypesOption struct{ MediaTypes []string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// Repository is a named collection of manifests and layers. +type Repository interface { + // Named returns the name of the repository. + Named() reference.Named + + // Manifests returns a reference to this repository's manifest service. + // with the supplied options applied. + Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) + + // Blobs returns a reference to this repository's blob service. + Blobs(ctx context.Context) BlobStore + + // TODO(stevvooe): The above BlobStore return can probably be relaxed to + // be a BlobService for use with clients. This will allow such + // implementations to avoid implementing ServeBlob. + + // Tags returns a reference to this repositories tag service + Tags(ctx context.Context) TagService +} + +// TODO(stevvooe): Must add close methods to all these. May want to change the +// way instances are created to better reflect internal dependency +// relationships. diff --git a/extender/code/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/extender/code/vendor/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 000000000..6d9bb4b62 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,267 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr.(type) { + case ErrorCode: + err = daErr.(ErrorCode).WithDetail(nil) + case Error: + err = daErr.(Error) + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/extender/code/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/extender/code/vendor/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 000000000..d77e70473 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,40 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + return json.NewEncoder(w).Encode(err) +} diff --git a/extender/code/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/extender/code/vendor/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 000000000..d1e8826c6 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,138 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/extender/code/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/extender/code/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go new file mode 100644 index 000000000..2c3ebe165 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go @@ -0,0 +1,27 @@ +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/extender/code/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/extender/code/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go new file mode 100644 index 000000000..6e3f1ccc4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -0,0 +1,237 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challenges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challenges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challenges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challenges[urlCopy.String()] = challenges + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/extender/code/vendor/github.com/docker/distribution/tags.go b/extender/code/vendor/github.com/docker/distribution/tags.go new file mode 100644 index 000000000..f22df2b85 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/tags.go @@ -0,0 +1,27 @@ +package distribution + +import ( + "context" +) + +// TagService provides access to information about tagged objects. +type TagService interface { + // Get retrieves the descriptor identified by the tag. Some + // implementations may differentiate between "trusted" tags and + // "untrusted" tags. If a tag is "untrusted", the mapping will be returned + // as an ErrTagUntrusted error, with the target descriptor. + Get(ctx context.Context, tag string) (Descriptor, error) + + // Tag associates the tag with the provided descriptor, updating the + // current association, if needed. + Tag(ctx context.Context, tag string, desc Descriptor) error + + // Untag removes the given tag association + Untag(ctx context.Context, tag string) error + + // All returns the set of tags managed by this tag service + All(ctx context.Context) ([]string, error) + + // Lookup returns the set of tags referencing the given digest. + Lookup(ctx context.Context, digest Descriptor) ([]string, error) +} diff --git a/extender/code/vendor/github.com/docker/distribution/vendor.conf b/extender/code/vendor/github.com/docker/distribution/vendor.conf new file mode 100644 index 000000000..a249caf26 --- /dev/null +++ b/extender/code/vendor/github.com/docker/distribution/vendor.conf @@ -0,0 +1,51 @@ +github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b +github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 +github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 +github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490 +github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 +github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 +github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 +github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 +github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 +github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab +github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 +github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 +github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c +github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b +github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f +github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 +github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c +github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 +github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef +github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6 +github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 +github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c +github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 +github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd +github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd +github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 +github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 +github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 +github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e +github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 +github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 +golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b +golang.org/x/net 4876518f9e71663000c348837735820161a42df7 +golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 +google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 +google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 +google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 +gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 +gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b +gopkg.in/yaml.v2 v2.2.1 +rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb +github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882 diff --git a/extender/code/vendor/github.com/docker/docker-credential-helpers/LICENSE b/extender/code/vendor/github.com/docker/docker-credential-helpers/LICENSE new file mode 100644 index 000000000..1ea555e2a --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker-credential-helpers/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 David Calavera + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/extender/code/vendor/github.com/docker/docker-credential-helpers/client/client.go b/extender/code/vendor/github.com/docker/docker-credential-helpers/client/client.go new file mode 100644 index 000000000..d1d0434cb --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// isValidCredsMessage checks if 'msg' contains invalid credentials error message. +// It returns whether the logs are free of invalid credentials errors and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. +func isValidCredsMessage(msg string) error { + if credentials.IsCredentialsMissingServerURLMessage(msg) { + return credentials.NewErrCredentialsMissingServerURL() + } + + if credentials.IsCredentialsMissingUsernameMessage(msg) { + return credentials.NewErrCredentialsMissingUsername() + } + + return nil +} + +// Store uses an external program to save credentials. +func Store(program ProgramFunc, creds *credentials.Credentials) error { + cmd := program("store") + + buffer := new(bytes.Buffer) + if err := json.NewEncoder(buffer).Encode(creds); err != nil { + return err + } + cmd.Input(buffer) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// Get executes an external program to get the credentials from a native store. +func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { + cmd := program("get") + cmd.Input(strings.NewReader(serverURL)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if credentials.IsErrCredentialsNotFoundMessage(t) { + return nil, credentials.NewErrCredentialsNotFound() + } + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) + } + + resp := &credentials.Credentials{ + ServerURL: serverURL, + } + + if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { + return nil, err + } + + return resp, nil +} + +// Erase executes a program to remove the server credentials from the native store. +func Erase(program ProgramFunc, serverURL string) error { + cmd := program("erase") + cmd.Input(strings.NewReader(serverURL)) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// List executes a program to list server credentials in the native store. +func List(program ProgramFunc) (map[string]string, error) { + cmd := program("list") + cmd.Input(strings.NewReader("unused")) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) + } + + var resp map[string]string + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return nil, err + } + + return resp, nil +} diff --git a/extender/code/vendor/github.com/docker/docker-credential-helpers/client/command.go b/extender/code/vendor/github.com/docker/docker-credential-helpers/client/command.go new file mode 100644 index 000000000..8da334306 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -0,0 +1,56 @@ +package client + +import ( + "fmt" + "io" + "os" + "os/exec" +) + +// Program is an interface to execute external programs. +type Program interface { + Output() ([]byte, error) + Input(in io.Reader) +} + +// ProgramFunc is a type of function that initializes programs based on arguments. +type ProgramFunc func(args ...string) Program + +// NewShellProgramFunc creates programs that are executed in a Shell. +func NewShellProgramFunc(name string) ProgramFunc { + return NewShellProgramFuncWithEnv(name, nil) +} + +// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables +func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { + return func(args ...string) Program { + return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + } +} + +func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { + programCmd := exec.Command(commandName, args...) + programCmd.Env = os.Environ() + if env != nil { + for k, v := range *env { + programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + } + programCmd.Stderr = os.Stderr + return programCmd +} + +// Shell invokes shell commands to talk with a remote credentials helper. +type Shell struct { + cmd *exec.Cmd +} + +// Output returns responses from the remote credentials helper. +func (s *Shell) Output() ([]byte, error) { + return s.cmd.Output() +} + +// Input sets the input to send to a remote credentials helper. +func (s *Shell) Input(in io.Reader) { + s.cmd.Stdin = in +} diff --git a/extender/code/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/extender/code/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go new file mode 100644 index 000000000..da8b594e7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go @@ -0,0 +1,186 @@ +package credentials + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" +) + +// Credentials holds the information shared between docker and the credentials store. +type Credentials struct { + ServerURL string + Username string + Secret string +} + +// isValid checks the integrity of Credentials object such that no credentials lack +// a server URL or a username. +// It returns whether the credentials are valid and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername +func (c *Credentials) isValid() (bool, error) { + if len(c.ServerURL) == 0 { + return false, NewErrCredentialsMissingServerURL() + } + + if len(c.Username) == 0 { + return false, NewErrCredentialsMissingUsername() + } + + return true, nil +} + +// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. +// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, +// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" +var CredsLabel = "Docker Credentials" + +// SetCredsLabel is a simple setter for CredsLabel +func SetCredsLabel(label string) { + CredsLabel = label +} + +// Serve initializes the credentials helper and parses the action argument. +// This function is designed to be called from a command line interface. +// It uses os.Args[1] as the key for the action. +// It uses os.Stdin as input and os.Stdout as output. +// This function terminates the program with os.Exit(1) if there is an error. +func Serve(helper Helper) { + var err error + if len(os.Args) != 2 { + err = fmt.Errorf("Usage: %s ", os.Args[0]) + } + + if err == nil { + err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) + } + + if err != nil { + fmt.Fprintf(os.Stdout, "%v\n", err) + os.Exit(1) + } +} + +// HandleCommand uses a helper and a key to run a credential action. +func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { + switch key { + case "store": + return Store(helper, in) + case "get": + return Get(helper, in, out) + case "erase": + return Erase(helper, in) + case "list": + return List(helper, out) + case "version": + return PrintVersion(out) + } + return fmt.Errorf("Unknown credential action `%s`", key) +} + +// Store uses a helper and an input reader to save credentials. +// The reader must contain the JSON serialization of a Credentials struct. +func Store(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + var creds Credentials + if err := json.NewDecoder(buffer).Decode(&creds); err != nil { + return err + } + + if ok, err := creds.isValid(); !ok { + return err + } + + return helper.Add(&creds) +} + +// Get retrieves the credentials for a given server url. +// The reader must contain the server URL to search. +// The writer is used to write the JSON serialization of the credentials. +func Get(helper Helper, reader io.Reader, writer io.Writer) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + username, secret, err := helper.Get(serverURL) + if err != nil { + return err + } + + resp := Credentials{ + ServerURL: serverURL, + Username: username, + Secret: secret, + } + + buffer.Reset() + if err := json.NewEncoder(buffer).Encode(resp); err != nil { + return err + } + + fmt.Fprint(writer, buffer.String()) + return nil +} + +// Erase removes credentials from the store. +// The reader must contain the server URL to remove. +func Erase(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + return helper.Delete(serverURL) +} + +//List returns all the serverURLs of keys in +//the OS store as a list of strings +func List(helper Helper, writer io.Writer) error { + accts, err := helper.List() + if err != nil { + return err + } + return json.NewEncoder(writer).Encode(accts) +} + +//PrintVersion outputs the current version. +func PrintVersion(writer io.Writer) error { + fmt.Fprintln(writer, Version) + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/extender/code/vendor/github.com/docker/docker-credential-helpers/credentials/error.go new file mode 100644 index 000000000..fe6a5aef4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -0,0 +1,102 @@ +package credentials + +const ( + // ErrCredentialsNotFound standardizes the not found error, so every helper returns + // the same message and docker can handle it properly. + errCredentialsNotFoundMessage = "credentials not found in native keychain" + + // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize + // invalid credentials or credentials management operations + errCredentialsMissingServerURLMessage = "no credentials server URL" + errCredentialsMissingUsernameMessage = "no credentials username" +) + +// errCredentialsNotFound represents an error +// raised when credentials are not in the store. +type errCredentialsNotFound struct{} + +// Error returns the standard error message +// for when the credentials are not in the store. +func (errCredentialsNotFound) Error() string { + return errCredentialsNotFoundMessage +} + +// NewErrCredentialsNotFound creates a new error +// for when the credentials are not in the store. +func NewErrCredentialsNotFound() error { + return errCredentialsNotFound{} +} + +// IsErrCredentialsNotFound returns true if the error +// was caused by not having a set of credentials in a store. +func IsErrCredentialsNotFound(err error) bool { + _, ok := err.(errCredentialsNotFound) + return ok +} + +// IsErrCredentialsNotFoundMessage returns true if the error +// was caused by not having a set of credentials in a store. +// +// This function helps to check messages returned by an +// external program via its standard output. +func IsErrCredentialsNotFoundMessage(err string) bool { + return err == errCredentialsNotFoundMessage +} + +// errCredentialsMissingServerURL represents an error raised +// when the credentials object has no server URL or when no +// server URL is provided to a credentials operation requiring +// one. +type errCredentialsMissingServerURL struct{} + +func (errCredentialsMissingServerURL) Error() string { + return errCredentialsMissingServerURLMessage +} + +// errCredentialsMissingUsername represents an error raised +// when the credentials object has no username or when no +// username is provided to a credentials operation requiring +// one. +type errCredentialsMissingUsername struct{} + +func (errCredentialsMissingUsername) Error() string { + return errCredentialsMissingUsernameMessage +} + +// NewErrCredentialsMissingServerURL creates a new error for +// errCredentialsMissingServerURL. +func NewErrCredentialsMissingServerURL() error { + return errCredentialsMissingServerURL{} +} + +// NewErrCredentialsMissingUsername creates a new error for +// errCredentialsMissingUsername. +func NewErrCredentialsMissingUsername() error { + return errCredentialsMissingUsername{} +} + +// IsCredentialsMissingServerURL returns true if the error +// was an errCredentialsMissingServerURL. +func IsCredentialsMissingServerURL(err error) bool { + _, ok := err.(errCredentialsMissingServerURL) + return ok +} + +// IsCredentialsMissingServerURLMessage checks for an +// errCredentialsMissingServerURL in the error message. +func IsCredentialsMissingServerURLMessage(err string) bool { + return err == errCredentialsMissingServerURLMessage +} + +// IsCredentialsMissingUsername returns true if the error +// was an errCredentialsMissingUsername. +func IsCredentialsMissingUsername(err error) bool { + _, ok := err.(errCredentialsMissingUsername) + return ok +} + +// IsCredentialsMissingUsernameMessage checks for an +// errCredentialsMissingUsername in the error message. +func IsCredentialsMissingUsernameMessage(err string) bool { + return err == errCredentialsMissingUsernameMessage +} diff --git a/extender/code/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/extender/code/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go new file mode 100644 index 000000000..135acd254 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go @@ -0,0 +1,14 @@ +package credentials + +// Helper is the interface a credentials store helper must implement. +type Helper interface { + // Add appends credentials to the store. + Add(*Credentials) error + // Delete removes credentials from the store. + Delete(serverURL string) error + // Get retrieves credentials from the store. + // It returns username and secret as strings. + Get(serverURL string) (string, string, error) + // List returns the stored serverURLs and their associated usernames. + List() (map[string]string, error) +} diff --git a/extender/code/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/extender/code/vendor/github.com/docker/docker-credential-helpers/credentials/version.go new file mode 100644 index 000000000..c2cc3e2e0 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker-credential-helpers/credentials/version.go @@ -0,0 +1,4 @@ +package credentials + +// Version holds a string describing the current version +const Version = "0.6.3" diff --git a/extender/code/vendor/github.com/docker/docker/AUTHORS b/extender/code/vendor/github.com/docker/docker/AUTHORS new file mode 100644 index 000000000..c6c8fb40e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/AUTHORS @@ -0,0 +1,2016 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron L. Xu +Aaron Lehmann +Aaron Welch +Aaron.L.Xu +Abel Muiño +Abhijeet Kasurde +Abhinandan Prativadi +Abhinav Ajgaonkar +Abhishek Chanda +Abhishek Sharma +Abin Shahab +Adam Avilla +Adam Eijdenberg +Adam Kunk +Adam Miller +Adam Mills +Adam Pointer +Adam Singer +Adam Walz +Addam Hardy +Aditi Rajagopal +Aditya +Adnan Khan +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akash Gupta +Akihiro Matsushima +Akihiro Suda +Akim Demaille +Akira Koyasu +Akshay Karle +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Alejandro González Hevia +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Chen +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Goodman +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Midlash +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre Garnier +Alexandre González +Alexandre Jomin +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Alfred Landrum +Ali Dehghani +Alicia Lauerman +Alihan Demir +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Deng +Alvin Richards +amangoel +Amen Belayneh +Amir Goldstein +Amit Bakshi +Amit Krishnan +Amit Shukla +Amr Gawish +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anda Xu +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Elvers +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew He +Andrew Hsu +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew McDonnell +Andrew Munsell +Andrew Pennebaker +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Anil Madhavapeddy +Ankush Agarwal +Anonmily +Anran Qiao +Anshul Pundir +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anthony Sottile +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +Arash Deshmeh +ArikaChen +Arnaud Lefebvre +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asad Saeeduddin +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Bonnefoy +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benjamin Baker +Benjamin Boudreau +Benjamin Yolken +Benoit Chesneau +Bernerd Schaefer +Bernhard M. Wiedemann +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill Wang +Bin Liu +Bingshen Wang +Blake Geno +Boaz Shuster +bobby abbott +Boris Pruessmann +Boshi Lian +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brett Randall +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Schwind +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Brielle Broder +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bruno Tavares +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlo Mion +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Catalin Pirvu +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander Govindarajan +Chanhun Jeong +Chao Wang +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Drage +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Chuanliang +Chen Hanxiao +Chen Min +Chen Mingjie +Chen Qiu +Cheng-mean Liu +Chengguang Xu +chenyuzhu +Chetan Birajdar +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dias +Chris Dituri +Chris Fordham +Chris Gavin +Chris Gibson +Chris Khoo +Chris McKinnel +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Telfer +Chris Wahl +Chris Weyl +Chris White +Christian Berendt +Christian Brauner +Christian Böhme +Christian Muehlhaeuser +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +Christophe Mehay +Christophe Troestler +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Cody Roseborough +Coenraad Loubser +Colin Dunklau +Colin Hebert +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Corbin Coleman +Corey Farrell +Cory Forsyth +cressie176 +CrimsonGlory +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +CUI Wei +Cyprian Gracz +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Williams +Dani Louca +Daniel Antlinger +Daniel Dao +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Grunwell +Daniel Hiltgen +Daniel J Walsh +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel Watkins +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Danny Berger +Danny Yates +Danyal Khaliq +Darren Coxall +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Davanum Srinivas +Dave Barboza +Dave Goodchild +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Chung +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Glasser +David Lawrence +David Lechner +David M. Karr +David Mackey +David Mat +David Mcanulty +David McKay +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Wang <00107082@163.com> +David Williamson +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +Debayan De +Deborah Gertrude Digges +deed02392 +Deng Guangxing +Deni Bertovic +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Chen +Dennis Chen +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Dhawal Yogesh Bhanushali +Diego Romero +Diego Siqueira +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Ding Fei +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Kononenko +Dmitry Shyshkin +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Dingel +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donghwa Kim +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Douglas Curtis +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Drew Hubl +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elango Sivanandam +Elena Morozova +Eli Uriegas +Elias Faxö +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Davtyan +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +epeterso +Eric Barch +Eric Curtin +Eric G. Noriega +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Soderstrom +Eric Yang +Eric-Olivier Lamey +Erica Windisch +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik St. Martin +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Ethan Bell +Euan Kemp +Eugen Krizo +Eugene Yakubovich +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evelyn Xu +Everett Toews +Evgeny Shmarnev +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Ezra Silvera +Fabian Lauer +Fabian Raetz +Fabiano Rosas +Fabio Falci +Fabio Kung +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangming Fang +Fangyuan Gao <21551127@zju.edu.cn> +fanjiyun +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felipe Oliveira +Felix Abecassis +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Feng Yan +Fengtu Wang +Ferenc Szabo +Fernando +Fero Volar +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +Flavio Castelli +Flavio Crisciani +Florian +Florian Klein +Florian Maier +Florian Noeding +Florian Weingarten +Florin Asavoaie +Florin Patan +fonglh +Foysal Iqbal +Francesc Campoy +Francesco Mari +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +Frieder Bluemle +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Linder +Gabriel Monroy +Gabriel Nicolas Avellaneda +Gaetan de Villele +Galen Sampson +Gang Qiao +Gareth Rushgrove +Garrett Barboza +Gary Schaetz +Gaurav +gautam, prasanna +Gaël PORTAY +Genki Takiuchi +GennadySpb +Geoffrey Bachelet +Geon Kim +George Kontridze +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gerwim Feiken +Ghislain Bourgeois +Giampaolo Mancini +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gopikannan Venugopalsamy +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Millar +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Pflaum +Greg Stephens +Greg Thornton +Grzegorz Jaśkiewicz +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Guri +Gurjeet Singh +Guruprasad +Gustav Sinder +gwx296173 +Günter Zöchbauer +haikuoliu +Hakan Özler +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +Harshal Patil +Harshal Patil +He Simei +He Xiaoxi +He Xin +heartlock <21521209@zju.edu.cn> +Hector Castro +Helen Xie +Henning Sprang +Hiroshi Hatake +Hobofan +Hollie Teal +Hong Xu +Hongbin Lu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +Hyzhou Zhy +Iago López Galeiras +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Campbell +Ian Chen +Ian Lee +Ian Main +Ian Philpot +Ian Truslove +Iavael +Icaro Seara +Ignacio Capurro +Igor Dolzhikov +Igor Karpovich +Iliana Weller +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +Ilya Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Iskander Sharipov +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +Ivan Markin +J Bruni +J. Nunn +Jack Danger Canty +Jack Laxson +Jacob Atzen +Jacob Edelman +Jacob Tomlinson +Jacob Vallejo +Jacob Wen +Jaivish Kothari +Jake Champlin +Jake Moshenko +Jake Sanders +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nesbitt +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Christophe Berthon +Jean-Paul Calderone +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeeva S. Chelladhurai +Jeff Anderson +Jeff Hajewski +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Chambers +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +Jhon Honce +Ji.Zhilong +Jian Zhang +Jie Luo +Jihyun Hwang +Jilles Oldenbeuving +Jim Alateras +Jim Galasyn +Jim Minter +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +Jimmy Song +jimmyxian +Jinsoo Park +Jiri Popelka +Jiuyue Ma +Jiří Župka +jjy +jmzwcn +Joao Fernandes +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Harris +John Howard (VM) +John Laswell +John Maguire +John Mulhausen +John OBrien III +John Starks +John Stephens +John Tims +John V. Martinez +John Warwick +John Willis +Jon Johnson +Jon Surrell +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Choy +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Jonh Wendell +Joni Sar +Joost Cassee +Jordan Arentsen +Jordan Jennings +Jordan Sissel +Jorge Marin +Jorit Kleine-Möllhoff +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Joseph Rothrock +Josh +Josh Bodah +Josh Bonczkowski +Josh Chorlton +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josh Soref +Josh Wilson +Josiah Kiehl +José Tomás Albornoz +Joyce Jang +JP +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Kassar +Julien Maitrehenry +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Menga +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +K. Heller +Kai Blin +Kai Qiang Wu (Kennan) +Kamil Domański +Kamjar Gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Karthik Karanth +Karthik Nayak +Kasper Fabæch Brandt +Kate Heddleston +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Kay Yan +kayrus +Kazuhiro Sera +Ke Li +Ke Xu +Kei Ohmura +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Ken Reese +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Burke +Kevin Clark +Kevin Feyrer +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Kern +Kevin Menard +Kevin Meredith +Kevin P. Kucharczyk +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin Gribov +Konstantin L +Konstantin Pelykh +Krasi Georgiev +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +Kun Zhang +Kunal Kushwaha +Kunal Tyagi +Kyle Conroy +Kyle Linden +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Lars-Magnus Skog +Laszlo Meszaros +Laura Frank +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Chao <932819864@qq.com> +Lee, Meng-Han +leeplay +Lei Gong +Lei Jitang +Len Weincier +Lennie +Leo Gallucci +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Daly +Lewis Marshall +Lewis Peckover +Li Yi +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +Liao Qingwei +Lily Guo +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +liwenqi +lixiaobing10051267 +Liz Zhang +LIZAO LI +Lizzie Dixon <_@lizzie.io> +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Lotus Fenn +Louis Opter +Luca Favatella +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Lucas Molas +Luciano Mores +Luis Martínez de Bartolomé Izquierdo +Luiz Svoboda +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +Luke Marsden +Lyn +Lynda O'Leary +Lénaïc Huard +Ma Müller +Ma Shimiao +Mabin +Madhan Raj Mookkandy +Madhav Puri +Madhu Venugopal +Mageee +Mahesh Tiyyagura +malnick +Malte Janduda +Manfred Touron +Manfred Zabarauskas +Manjunath A Kumatagi +Mansi Nahar +Manuel Meurer +Manuel Rüger +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcel Edmund Franke +Marcelo Horacio Fortino +Marcelo Salazar +Marco Hennings +Marcus Cobden +Marcus Farkas +Marcus Linke +Marcus Martins +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark Milstein +Mark Oates +Mark Parker +Mark West +Markan Patel +Marko Mikulicic +Marko Tibold +Markus Fix +Markus Kortlang +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Muzatko +Martin Redmond +Mary Anthony +Masahito Zembutsu +Masato Ohba +Masayuki Morita +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Champlon +Mathieu Le Marec - Pasquet +Mathieu Parent +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Richardson +Matt Rickard +Matt Robenolt +Matt Schurenko +Matt Williams +Matthew Heon +Matthew Lapworth +Matthew Mayer +Matthew Mosesohn +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Irwin +Michael Käufl +Michael Neale +Michael Nussbaum +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minář +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michał Gryko +Michiel de Jong +Mickaël Fortunato +Mickaël Remars +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Estes +Mike Gaffney +Mike Goelzer +Mike Leone +Mike Lundy +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miklos Szegedi +Milind Chawre +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +Mizuki Urushida +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Moorthy RS +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mrfly +Mrunal Patel +Muayyad Alsadi +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nace Oroz +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Naveed Jamil +Neal McBurnett +Neil Horman +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +Nick DeCoursin +Nick Irvine +Nick Neisen +Nick Parker +Nick Payne +Nick Russo +Nick Stenning +Nick Stinemates +NickrenREN +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolas Sterchele +Nicolas V Castet +Nicolás Hock Isaza +Nigel Poulton +Nik Nyby +Nikhil Chawla +NikolaMandic +Nikolas Garofil +Nikolay Milovanov +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +Noah Meyerhans +Noah Treuhaft +NobodyOnSE +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +Oskar Niburski +Otto Kekäläinen +Ouyang Liduo +Ovidio Mallo +Panagiotis Moustafellos +Paolo G. Giarrusso +Pascal +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Hemmer +Patrick Stapleton +Patrik Cyvoct +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Kehrer +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Ribeiro +Pavel Lobashov +Pavel Pletenev +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Pawel Konczalski +Peeyush Gupta +Peggy Li +Pei Su +Peng Tao +Penghan Wang +Per Weijnitz +perhapszzy@sina.com +Peter Bourgon +Peter Braden +Peter Bücker +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Jaffe +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Švihlík +Phil +Phil Estes +Phil Spitler +Philip Alexander Etling +Philip Monroe +Philipp Gillé +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +phineas +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Pradip Dhara +Prasanna Gautam +Pratik Karki +Prayag Verma +Priya Wadhwa +Projjol Banerji +Przemek Hejman +Pure White +pysqz +Qiang Huang +Qinglan Peng +qudongfang +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Raja Sami +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Reficul +Regan McCooey +Remi Rampin +Remy Suen +Renato Riccieri Santos Zannon +Renaud Gaubert +Rhys Hiltner +Ri Xu +Ricardo N Feliciano +Rich Moyse +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Schneider +Robert Stern +Robert Terhaar +Robert Wallis +Roberto G. Hashioka +Roberto Muñoz Fernández +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Rojin George +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Dudin +Roman Strashkin +Ron Smits +Ron Williams +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Royce Remer +Rozhnov Alexandr +Rudolph Gottesheim +Rui Lopes +Runshen Zhu +Russ Magee +Ryan Abrams +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan Liu +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Simmen +Ryan Stelly +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +Ryan Zhang +ryancooper7 +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +Sakeven Jiang +Salahuddin Khan +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sandeep Bansal +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Sargun Dhillon +Sascha Andres +Satnam Singh +Satoshi Amemiya +Satoshi Tagomori +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean Lee +Sean McIntyre +Sean OMeara +Sean P. Kane +Sean Rodman +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sergii Kabashniuk +Serhat Gülçiçek +SeungUkLee +Sevki Hasirci +Shane Canon +Shane da Silva +Shaun Kaasten +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shayne Wang +Shekhar Gulati +Sheng Yang +Shengbo Song +Shev Yan +Shih-Yuan Lee +Shijiang Wei +Shijun Qin +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +shuai-z +Shukui Yang +Shuwei Hao +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silas Sewell +Silvan Jegen +Simei He +Simon Eskildsen +Simon Ferquel +Simon Leinen +Simon Menke +Simon Taranto +Simon Vikstrom +Sindhu S +Sjoerd Langkemper +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +Srinivasan Srivatsan +Stanislav Bondarenko +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan S. +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephan Spindler +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Desmond +Steve Dougherty +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Hartland +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Sun Gengze <690388648@qq.com> +Sun Jianbo +Sunny Gogoi +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien HOUZÉ +Sébastien Luttringer +Sébastien Stormacq +Tabakhase +Tadej Janež +TAGOMORI Satoshi +tang0th +Tangi Colin +Tatsuki Sugiura +Tatsushi Inagaki +Taylan Isikdemir +Taylor Jones +Ted M. Young +Tehmasp Chaudhri +Tejaswini Duggaraju +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas Léveil +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Ti Zhou +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Tim Bart +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Potter +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +Tim Zju <21651152@zju.edu.cn> +timfeirg +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Booth +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom Sweeney +Tom Wilkie +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonny Xu +Tony Abboud +Tony Daws +Tony Miller +toogley +Torstein Husebø +Tõnis Tiigi +tpng +tracylihui <793912329@qq.com> +Trapier Marshall +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +Trevor Sullivan +Trishna Guha +Tristan Carel +Troy Denton +Tycho Andersen +Tyler Brock +Tyler Brown +Tzu-Jung Lee +uhayate +Ulysse Carion +Umesh Yadav +Utz Bacher +vagrant +Vaidas Jablonskis +vanderliang +Veres Lajos +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitaly Ostrosablin +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vlastimil Zeman +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +Wang Chao +Wang Guoliang +Wang Jie +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Ward Vandewege +WarheadsSE +Wassim Dhif +Wayne Chang +Wayne Song +Weerasak Chongnguluam +Wei Wu +Wei-Ting Kuo +weipeng +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenjun Tang +Wenkai Yin +Wentao Zhang +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Martin +William Riancho +William Thurston +WiseTrem +Wolfgang Powisch +Wonjun Kim +xamyzhao +Xianglin Gao +Xianlu Bird +XiaoBing Jiang +Xiaoxu Chen +Xiaoyu Zhang +xiekeyang +Ximo Guanter Gonzálbez +Xinbo Weng +Xinzi Zhou +Xiuming Chen +Xuecong Liao +xuzhaokui +Yadnyawalkya Tale +Yahya +YAMADA Tsuyoshi +Yamasaki Masahide +Yan Feng +Yang Bai +Yang Pengfei +yangchenliang +Yanqiang Miao +Yao Zaiyong +Yassine Tijani +Yasunori Mahata +Yazhong Liu +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +Yosef Fertel +You-Sheng Yang (楊有勝) +Youcef YEKHLEF +Yu Changchun +Yu Chengxia +Yu Peng +Yu-Ju Hong +Yuan Sun +Yuanhong Peng +Yuhao Fang +Yuichiro Kaneko +Yunxiang Huang +Yurii Rashkovskii +Yves Junqueira +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhangxianwei +Zhenan Ye <21551168@zju.edu.cn> +zhenghenghuo +Zhenkun Bi +Zhou Hao +Zhu Guihua +Zhu Kunjia +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +Zou Yu +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +徐俊杰 +慕陶 +搏通 +黄艳红00139573 diff --git a/extender/code/vendor/github.com/docker/docker/LICENSE b/extender/code/vendor/github.com/docker/docker/LICENSE new file mode 100644 index 000000000..6d8d58fb6 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/docker/docker/NOTICE b/extender/code/vendor/github.com/docker/docker/NOTICE new file mode 100644 index 000000000..0c74e15b0 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/extender/code/vendor/github.com/docker/docker/api/README.md b/extender/code/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 000000000..f136c3433 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. + +The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/extender/code/vendor/github.com/docker/docker/api/common.go b/extender/code/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 000000000..aa146cdae --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,11 @@ +package api // import "github.com/docker/docker/api" + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion = "1.40" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier = "scratch" +) diff --git a/extender/code/vendor/github.com/docker/docker/api/common_unix.go b/extender/code/vendor/github.com/docker/docker/api/common_unix.go new file mode 100644 index 000000000..504b0c90d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/common_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +const MinVersion = "1.12" diff --git a/extender/code/vendor/github.com/docker/docker/api/common_windows.go b/extender/code/vendor/github.com/docker/docker/api/common_windows.go new file mode 100644 index 000000000..590ba5479 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/common_windows.go @@ -0,0 +1,8 @@ +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/extender/code/vendor/github.com/docker/docker/api/swagger-gen.yaml b/extender/code/vendor/github.com/docker/docker/api/swagger-gen.yaml new file mode 100644 index 000000000..f07a02737 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/extender/code/vendor/github.com/docker/docker/api/swagger.yaml b/extender/code/vendor/github.com/docker/docker/api/swagger.yaml new file mode 100644 index 000000000..78c576cef --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/swagger.yaml @@ -0,0 +1,10437 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.40" +info: + title: "Docker Engine API" + version: "1.40" + x-logo: + url: "https://docs.docker.com/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.40) is used. + For example, calling `/info` is the same as calling `/v1.40/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. + + To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: "A mount point inside a container" + properties: + Type: + type: "string" + Name: + type: "string" + Source: + type: "string" + Destination: + type: "string" + Driver: + type: "string" + Mode: + type: "string" + RW: + type: "boolean" + Propagation: + type: "string" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: "If `on-failure` is used, the number of times to retry before giving up" + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: "An integer value representing this container's relative CPU weight versus other containers." + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpusetCpus: + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" + type: "string" + example: "0-3" + CpusetMems: + description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: "a list of requests for devices to be sent to device drivers" + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + DiskQuota: + description: "Disk limit (in bytes)." + type: "integer" + format: "int64" + KernelMemory: + description: "Kernel memory limit in bytes." + type: "integer" + format: "int64" + example: 209715200 + KernelMemoryTCP: + description: "Hard limit for kernel TCP buffer memory (in bytes)." + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." + type: "integer" + format: "int64" + MemorySwappiness: + description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCPUs: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: "Maximum IO in bytes per second for the container system drive (Windows only)" + type: "integer" + format: "int64" + + ResourceObject: + description: "An object describing the resources which can be advertised by a node and requested by a task" + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: "User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`)" + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + Timeout: + description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + Retries: + description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." + type: "integer" + StartPeriod: + description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. + - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to." + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: "A list of volumes to inherit from another container, specified in the form `[:]`." + items: + type: "string" + Mounts: + description: "Specification for mounts to be added to the container." + type: "array" + items: + $ref: "#/definitions/Mount" + + # Applicable to UNIX platforms + Capabilities: + type: "array" + description: | + A list of kernel capabilities to be available for container (this overrides the default set). + + Conflicts with options 'CapAdd' and 'CapDrop'" + items: + type: "string" + CapAdd: + type: "array" + description: "A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'" + items: + type: "string" + CapDrop: + type: "array" + description: "A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'" + items: + type: "string" + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: "A list of links for the container in the form `container_name:alias`." + items: + type: "string" + OomScoreAdj: + type: "integer" + description: "An integer value containing the score given to the container in order to tune OOM killer preferences." + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when the container starts. + The allocated port might be changed when restarting the container. + + The port is selected from the ephemeral port range that depends on the kernel. + For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: "A list of string values to customize labels for MLS + systems, such as SELinux." + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." + ShmSize: + type: "integer" + description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array. (Windows only)" + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Isolation: + type: "string" + description: "Isolation technology of the container. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + MaskedPaths: + type: "array" + description: "The list of paths to be masked inside the container (this overrides the default set of paths)" + items: + type: "string" + ReadonlyPaths: + type: "array" + description: "The list of paths to be set as read-only inside the container (this overrides the default set of paths)" + items: + type: "string" + + ContainerConfig: + description: "Configuration for a container that is portable between hosts" + type: "object" + properties: + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Domainname: + description: "The domain name to use for the container." + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + Tty: + description: "Attach standard streams to a TTY, including `stdin` if it is not closed." + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. + type: "array" + items: + type: "string" + Cmd: + description: "Command to run specified as a string or an array of strings." + type: "array" + items: + type: "string" + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + Image: + description: "The name of the image to use when creating the container" + type: "string" + Volumes: + description: "An object mapping mount point paths inside the container to empty objects." + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + MacAddress: + description: "MAC address of the container." + type: "string" + OnBuild: + description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." + type: "array" + items: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + StopSignal: + description: "Signal to stop a container as a string or unsigned integer." + type: "string" + default: "SIGTERM" + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + Shell: + description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." + type: "array" + items: + type: "string" + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: Name of the network'a bridge (for example, `docker0`). + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: IPv6 unicast address using the link-local prefix. + type: "string" + example: "fe80::42:acff:fe11:1" + LinkLocalIPv6PrefixLen: + description: Prefix length of the IPv6 unicast address. + type: "integer" + example: "64" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey identifies the sandbox + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + # TODO is SecondaryIPAddresses actually used? + SecondaryIPAddresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO is SecondaryIPv6Addresses actually used? + SecondaryIPv6Addresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + x-nullable: true + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + GraphDriverData: + description: "Information about a container's graph driver." + type: "object" + required: [Name, Data] + properties: + Name: + type: "string" + x-nullable: false + Data: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + + Image: + type: "object" + required: + - Id + - Parent + - Comment + - Created + - Container + - DockerVersion + - Author + - Architecture + - Os + - Size + - VirtualSize + - GraphDriver + - RootFS + properties: + Id: + type: "string" + x-nullable: false + RepoTags: + type: "array" + items: + type: "string" + RepoDigests: + type: "array" + items: + type: "string" + Parent: + type: "string" + x-nullable: false + Comment: + type: "string" + x-nullable: false + Created: + type: "string" + x-nullable: false + Container: + type: "string" + x-nullable: false + ContainerConfig: + $ref: "#/definitions/ContainerConfig" + DockerVersion: + type: "string" + x-nullable: false + Author: + type: "string" + x-nullable: false + Config: + $ref: "#/definitions/ContainerConfig" + Architecture: + type: "string" + x-nullable: false + Os: + type: "string" + x-nullable: false + OsVersion: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + VirtualSize: + type: "integer" + format: "int64" + x-nullable: false + GraphDriver: + $ref: "#/definitions/GraphDriverData" + RootFS: + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + Layers: + type: "array" + items: + type: "string" + BaseLayer: + type: "string" + Metadata: + type: "object" + properties: + LastTagTime: + type: "string" + format: "dateTime" + + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + type: "string" + x-nullable: false + ParentId: + type: "string" + x-nullable: false + RepoTags: + type: "array" + x-nullable: false + items: + type: "string" + RepoDigests: + type: "array" + x-nullable: false + items: + type: "string" + Created: + type: "integer" + x-nullable: false + Size: + type: "integer" + x-nullable: false + SharedSize: + type: "integer" + x-nullable: false + VirtualSize: + type: "integer" + x-nullable: false + Labels: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + Containers: + x-nullable: false + type: "integer" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + Scope: + type: "string" + description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." + default: "local" + x-nullable: false + enum: ["local", "global"] + Options: + type: "object" + description: "The driver specific options used when creating the volume." + additionalProperties: + type: "string" + UsageData: + type: "object" + x-nullable: true + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + example: + Name: "tardis" + Driver: "custom" + Mountpoint: "/var/lib/docker/volumes/tardis" + Status: + hello: "world" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + CreatedAt: "2016-06-07T20:31:11.853781916Z" + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Attachable: + type: "boolean" + Ingress: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Attachable: false + Ingress: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + properties: + ID: + type: "string" + Parent: + type: "string" + Type: + type: "string" + Description: + type: "string" + InUse: + type: "boolean" + Shared: + type: "boolean" + Size: + type: "integer" + CreatedAt: + type: "integer" + LastUsedAt: + type: "integer" + x-nullable: true + UsageCount: + type: "integer" + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + MacAddress: + description: | + MAC address for the endpoint on this network. + type: "string" + example: "02:42:ac:11:00:04" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: "True if the plugin is running. False if the plugin is not running, only installed." + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. + The client must send the version number along with the modified specification when updating these objects. + This approach ensures safe concurrency and determinism in that the change on the object + may not be applied if the version number has changed from the last read. In other words, + if two update requests specify the same base version, only one of the requests can succeed. + As a result, two separate update requests that happen at the same time will not + unintentionally overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "logentries" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate" + type: "object" + properties: + TrustRoot: + description: "The root CA certificate(s) that are used to validate leaf TLS certificates" + type: "string" + CertIssuerSubject: + description: "The base64-url-safe-encoded raw subject bytes of the issuer" + type: "string" + CertIssuerPublicKey: + description: "The base64-url-safe-encoded raw public key bytes of the issuer" + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: "The number of snapshots to keep beyond the current snapshot." + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: "The delay for an agent to send a heartbeat to the dispatcher." + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: "Configuration for forwarding signing requests to an external certificate authority." + type: "array" + items: + type: "object" + properties: + Protocol: + description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: "URL where certificate signing requests should be sent." + type: "string" + Options: + description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." + type: "object" + additionalProperties: + type: "string" + CACert: + description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)." + type: "string" + SigningCACert: + description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format." + type: "string" + SigningCAKey: + description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format." + type: "string" + ForceRotate: + description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`" + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: "If set, generate a key and use it to lock data stored on the managers." + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selectd log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: "Whether there is currently a root CA rotation in progress for the swarm" + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the default subnet pool + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Env: + description: "A list of environment variables in the form `VAR=value`." + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by the daemon, and must be present in the + `CredentialSpecs` subdirectory in the docker data directory, which defaults to + `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows registry. The specified registry value must be + located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: "Specification for mounts to be added to containers created as part of the service." + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: "Amount of time to wait for the container to terminate before forcefully killing it." + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." + type: "array" + items: + type: "string" + Secrets: + description: "Secrets contains references to zero or more secrets that will be exposed to the service." + type: "array" + items: + type: "object" + properties: + File: + description: "File represents a specific target that is backed by a file." + type: "object" + properties: + Name: + description: "Name represents the final filename in the filesystem." + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: "SecretID represents the ID of the specific secret that we're referencing." + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, but this is just provided for + lookup/display purposes. The secret in the reference will be identified by its ID. + type: "string" + Configs: + description: "Configs contains references to zero or more configs that will be exposed to the service." + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: "Name represents the final filename in the filesystem." + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + ConfigID: + description: "ConfigID represents the ID of the specific config that we're referencing." + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, but this is just provided for + lookup/display purposes. The config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: "Isolation technology of the containers running the service. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + Init: + description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: "Resource requirements which apply to each individual container created as part of the service." + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/ResourceObject" + Reservation: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: "Specification for the restart policy which applies to containers created as part of this service." + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." + type: "integer" + format: "int64" + default: 0 + Window: + description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: "An array of constraints." + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + Preferences: + description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence." + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: "label descriptor, such as engine.labels.az" + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: "Maximum number of replicas for per node (default value is 0, which is unlimited)" + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: "A counter that triggers an update even if no relevant parameters have been changed." + type: "integer" + Runtime: + description: "Runtime is the type of runtime specified for the task executor." + type: "string" + Networks: + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + LogDriver: + description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an updated task fails to run, or stops running during the update." + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: "Amount of time to monitor each updated task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Order: + description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between rollback iterations, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an rolled back task fails to run, or stops running during the rollback." + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: "Amount of time to monitor each rolled back task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Order: + description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: "Array of network names or IDs to attach the service to." + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: "The mode of resolution to use for internal load balancing + between tasks." + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerSummary: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) + data to store as secret. + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: "Name of the secrets driver used to fetch the secret's value from an external secret store" + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) + config data. + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + SystemStatus: + description: | + Status information about this node (standalone Swarm API). + +


+ + > **Note**: The information returned in this field is only propagated + > by the Swarm standalone API, and is empty (`null`) when using + > built-in swarm mode. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Role", "primary"] + - ["State", "Healthy"] + - ["Strategy", "spread"] + - ["Filters", "health, port, containerslots, dependency, affinity, constraint, whitelist"] + - ["Nodes", "2"] + - [" swarm-agent-00", "192.168.99.102:2376"] + - [" └ ID", "5CT6:FBGO:RVGO:CZL4:PB2K:WCYN:2JSV:KSHH:GGFW:QOPG:6J5Q:IOZ2|192.168.99.102:2376"] + - [" └ Status", "Healthy"] + - [" └ Containers", "1 (1 Running, 0 Paused, 0 Stopped)"] + - [" └ Reserved CPUs", "0 / 1"] + - [" └ Reserved Memory", "0 B / 1.021 GiB"] + - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"] + - [" └ UpdatedAt", "2017-08-09T10:03:46Z"] + - [" └ ServerVersion", "17.06.0-ce"] + - [" swarm-manager", "192.168.99.101:2376"] + - [" └ ID", "TAMD:7LL3:SEF7:LW2W:4Q2X:WVFH:RTXX:JSYS:XY2P:JEHL:ZMJK:JGIW|192.168.99.101:2376"] + - [" └ Status", "Healthy"] + - [" └ Containers", "2 (2 Running, 0 Paused, 0 Stopped)"] + - [" └ Reserved CPUs", "0 / 1"] + - [" └ Reserved Memory", "0 B / 1.021 GiB"] + - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"] + - [" └ UpdatedAt", "2017-08-09T10:04:11Z"] + - [" └ ServerVersion", "17.06.0-ce"] + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemory: + description: "Indicates if the host has kernel memory limit support enabled." + type: "boolean" + example: true + CpuCfsPeriod: + description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host." + type: "boolean" + example: true + CpuCfsQuota: + description: "Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host." + type: "boolean" + example: true + CPUShares: + description: "Indicates if CPU Shares limiting is supported by the host." + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: "Indicates if `bridge-nf-call-iptables` is available on the host." + type: "boolean" + example: true + BridgeNfIp6tables: + description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." + type: "boolean" + example: true + Debug: + description: "Indicates if the daemon is running in debug-mode / with debug-level logging enabled." + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd"] + default: "cgroupfs" + example: "cgroupfs" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "4.9.38-moby" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Alpine Linux v3.5" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in kilobytes (kB). + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + + > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) + > returns the Swarm version instead of the daemon version, for example + > `swarm/1.2.8`. + type: "string" + example: "17.06.0-ce" + ClusterStore: + description: | + URL of the distributed storage backend. + + + The storage backend is used for multihost networking (to store + network and endpoint information) and by the node discovery mechanism. + +


+ + > **Note**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "consul://consul.corp.example.com:8600/some/path" + ClusterAdvertise: + description: | + The network endpoint that the Engine advertises for the purpose of + node discovery. ClusterAdvertise is a `host:port` combination on which + the daemon is reachable by other hosts. + +


+ + > **Note**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "node5.corp.example.com:8000" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, and user-namespaces (userns). + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + - "WARNING: bridge-nf-call-iptables is disabled" + - "WARNING: bridge-nf-call-ip6tables is disabled" + + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + AllowNondistributableArtifactsCIDRs: + description: | + List of IP ranges to which nondistributable artifacts can be pushed, + using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior, and enables the daemon to + push nondistributable artifacts to all registries whose resolved IP + address is within the subnet described by the CIDR syntax. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + AllowNondistributableArtifactsHostnames: + description: | + List of registry hostnames to which nondistributable artifacts can be + pushed, using the format `[:]` or `[:]`. + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior for the specified + registries. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + type: "array" + items: + type: "string" + example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + Expected: + description: | + Commit ID of external tool expected by dockerd as set at build time. + type: "string" + example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container than inspecting a single container. For example, + the list of linked containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: "Return all containers. By default, only running containers are shown" + type: "boolean" + default: false + - name: "limit" + in: "query" + description: "Return this number of most recently created containers, including non-running ones." + type: "integer" + - name: "size" + in: "query" + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + type: "string" + pattern: "/?[a-zA-Z0-9_-]+" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + description: "This container's networking configuration." + type: "object" + properties: + EndpointsConfig: + description: "A mapping of network name to endpoint configuration for that network." + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + NanoCPUs: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + type: "object" + title: "ContainerCreateResponse" + description: "OK response to ContainerCreate operation" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + examples: + application/json: + Id: "e90e34656806" + Warnings: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerInspectResponse" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + description: "The state of the container." + type: "object" + properties: + Status: + description: | + The status of the container. For example, `"running"` or `"exited"`. + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the cgroups freezer is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + Paused: + description: "Whether this container is paused." + type: "boolean" + Restarting: + description: "Whether this container is restarting." + type: "boolean" + OOMKilled: + description: "Whether this container has been killed because it ran out of memory." + type: "boolean" + Dead: + type: "boolean" + Pid: + description: "The process ID of this container" + type: "integer" + ExitCode: + description: "The last exit code of this container" + type: "integer" + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + FinishedAt: + description: "The time when this container last exited." + type: "string" + Image: + description: "The container's image" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Node: + description: "TODO" + type: "object" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + description: "IDs of exec instances that are running in the container." + type: "array" + items: + type: "string" + x-nullable: true + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriverData" + SizeRw: + description: "The size of files that have been created or changed by this container." + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + ExecIDs: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + IpcMode: "" + LxcConf: [] + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerTopResponse" + description: "OK response to ContainerTop operation" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: "Each process running in the container, where each is process is an array of values corresponding to the titles" + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + operationId: "ContainerLogs" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + type: "object" + x-go-name: "ContainerChangeResponseItem" + title: "ContainerChangeResponseItem" + description: "change item in response to ContainerChanges operation" + required: [Path, Kind] + properties: + Path: + description: "Path to file that has changed" + type: "string" + x-nullable: false + Kind: + description: "Kind of change" + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: "Stream the output. If false, the stats will be output once and then it will disconnect." + type: "boolean" + default: true + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the tty session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the tty session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: "Send a POSIX signal to a container, defaulting to killing to the container." + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: "Change various configuration options of a container without having to recreate it." + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + title: "ContainerUpdateResponse" + description: "OK response to ContainerUpdate operation" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + KernelMemory: 52428800 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the cgroups freezer to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. + + See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. + + Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Stream attached streams from the time the request was made onwards" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + type: "object" + title: "ContainerWaitResponse" + description: "OK response to ContainerWait operation" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + x-nullable: false + Error: + description: "container waiting error, if any" + type: "object" + properties: + Message: + description: "Details of an error" + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'." + type: "string" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove the volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: "A base64 - encoded JSON object with some filesystem header information about the path" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." + type: "string" + - name: "inputStream" + in: "body" + required: true + description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + examples: + application/json: + - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + ParentId: "" + RepoTags: + - "ubuntu:12.04" + - "ubuntu:precise" + RepoDigests: + - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" + Created: 1474925151 + Size: 103579269 + VirtualSize: 103579269 + SharedSize: 0 + Labels: {} + Containers: 2 + - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" + ParentId: "" + RepoTags: + - "ubuntu:12.10" + - "ubuntu:quantal" + RepoDigests: + - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" + - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" + Created: 1403128455 + Size: 172064416 + VirtualSize: 172064416 + SharedSize: 0 + Labels: {} + Containers: 5 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + type: "string" + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: "Sets the networking mode for the run commands during + build. Supported standard values are: `bridge`, `host`, `none`, and + `container:`. Any other value is taken as a custom network's + name to which this container should connect to." + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: + - `until=`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Image" + examples: + application/json: + Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" + Comment: "" + Os: "linux" + Architecture: "amd64" + Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + ContainerConfig: + Tty: false + Hostname: "e611e15f9c9d" + Domainname: "" + AttachStdout: false + PublishService: "" + AttachStdin: false + OpenStdin: false + StdinOnce: false + NetworkDisabled: false + OnBuild: [] + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + User: "" + WorkingDir: "" + MacAddress: "" + AttachStderr: false + Labels: + com.example.license: "GPL" + com.example.version: "1.0" + com.example.vendor: "Acme" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + - "/bin/sh" + - "-c" + - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + DockerVersion: "1.9.0-dev" + VirtualSize: 188359297 + Size: 0 + Author: "" + Created: "2015-09-10T08:30:53.26995814Z" + GraphDriver: + Name: "aufs" + Data: {} + RepoDigests: + - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + RepoTags: + - "example:1.0" + - "example:latest" + - "example:stable" + Config: + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + NetworkDisabled: false + OnBuild: [] + StdinOnce: false + PublishService: "" + AttachStdin: false + OpenStdin: false + Domainname: "" + AttachStdout: false + Tty: false + Hostname: "e611e15f9c9d" + Cmd: + - "/bin/bash" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Labels: + com.example.vendor: "Acme" + com.example.version: "1.0" + com.example.license: "GPL" + MacAddress: "" + AttachStderr: false + WorkingDir: "" + User: "" + RootFS: + Type: "layers" + Layers: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-automated=(true|false)` + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemVersionResponse" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + type: "string" + Version: + type: "string" + x-nullable: false + Details: + type: "object" + x-nullable: true + + Version: + type: "string" + ApiVersion: + type: "string" + MinAPIVersion: + type: "string" + GitCommit: + type: "string" + GoVersion: + type: "string" + Os: + type: "string" + Arch: + type: "string" + KernelVersion: + type: "string" + Experimental: + type: "boolean" + BuildTime: + type: "string" + examples: + application/json: + Version: "17.04.0" + Os: "linux" + KernelVersion: "3.19.0-23-generic" + GoVersion: "go1.7.5" + GitCommit: "deadbee" + Arch: "amd64" + ApiVersion: "1.27" + MinAPIVersion: "1.12" + BuildTime: "2016-06-14T07:09:13.444803460+00:00" + Experimental: true + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + BuildKit-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + BuildKit-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update` + + Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, and `untag` + + Volumes report these events: `create`, `mount`, `unmount`, and `destroy` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, and `remove` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image repositories. + + For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + DetachKeys: + type: "string" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: "A list of environment variables in the form `[\"VAR=value\", ...]`." + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." + WorkingDir: + type: "string" + description: "The working directory for the exec process inside the container." + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: + Detach: false + Tty: false + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." + operationId: "ExecResize" + responses: + 201: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + type: "object" + title: "VolumeListResponse" + description: "Volume list response" + required: [Volumes, Warnings] + properties: + Volumes: + type: "array" + x-nullable: false + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + x-nullable: false + description: "Warnings that occurred when fetching the list of volumes" + items: + type: "string" + + examples: + application/json: + Volumes: + - CreatedAt: "2017-07-19T12:00:26Z" + Name: "tardis" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + Options: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Warnings: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + type: "object" + description: "Volume configuration" + title: "VolumeConfig" + properties: + Name: + description: "The new volume's name. If not specified, Docker generates a name." + type: "string" + x-nullable: false + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + DriverOpts: + description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Driver: "custom" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than inspecting a single network. For example, + the list of containers attached to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + title: "NetworkCreateResponse" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions." + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode." + type: "boolean" + Ingress: + description: "Ingress network is the network which provides the routing-mesh in swarm mode." + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Attachable: false + Ingress: false + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + description: "Describes a permission the user has to accept upon installing the plugin." + type: "object" + title: "PluginPrivilegeItem" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "force" + in: "query" + description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: "The version number of the node object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, + or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` + is used. + + The `DataPathAddr` specifies the address that global scope network drivers will publish towards other + nodes in order to reach the containers running on this node. Using this parameter it is possible to + separate the container data traffic from the management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the default subnet pool + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, + or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` + is used. + + The `DataPathAddr` specifies the address that global scope network drivers will publish towards other + nodes in order to reach the containers running on this node. Using this parameter it is possible to + separate the container data traffic from the management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: "Addresses of manager nodes already participating in the swarm." + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: "Force leave swarm, even if this is the last manager or that it will break the cluster." + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + title: "ServiceCreateResponse" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: "The version number of the service object being updated. + This is required to avoid conflicting writes. + This version number should be the value as currently set on the service *before* the update. + You can find the current version by calling `GET /services/{id}`" + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + type: "string" + description: "If the X-Registry-Auth header is not specified, this + parameter indicates where to find registry authorization credentials. The + valid values are `spec` and `previous-spec`." + default: "spec" + - name: "rollback" + in: "query" + type: "string" + description: "Set to this parameter to `previous` to cause a + server-side rollback to the previous service spec. The supplied spec will be + ignored in this case." + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "ServiceLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." + - name: "version" + in: "query" + description: "The version number of the secret object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: "The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values." + - name: "version" + in: "query" + description: "The version number of the config object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: "Return image digest and platform information by contacting the registry." + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + properties: + Descriptor: + type: "object" + description: "A descriptor struct containing digest, media type, and size" + properties: + MediaType: + type: "string" + Size: + type: "integer" + format: "int64" + Digest: + type: "string" + URLs: + type: "array" + items: + type: "string" + Platforms: + type: "array" + description: "An array containing all platforms supported by the image" + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + OSVersion: + type: "string" + OSFeatures: + type: "array" + items: + type: "string" + Variant: + type: "string" + Features: + type: "array" + items: + type: "string" + examples: + application/json: + Descriptor: + MediaType: "application/vnd.docker.distribution.manifest.v2+json" + Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + Size: 3987495 + URLs: + - "" + Platforms: + - Architecture: "amd64" + OS: "linux" + OSVersion: "" + OSFeatures: + - "" + Variant: "" + Features: + - "" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. + + > **Note**: This endpoint is *experimental* and only available if the daemon is started with experimental + > features enabled. The specifications for this endpoint may still change in a future version of the API. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session (experimental)"] diff --git a/extender/code/vendor/github.com/docker/docker/api/types/auth.go b/extender/code/vendor/github.com/docker/docker/api/types/auth.go new file mode 100644 index 000000000..ddf15bb18 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/auth.go @@ -0,0 +1,22 @@ +package types // import "github.com/docker/docker/api/types" + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/backend/backend.go b/extender/code/vendor/github.com/docker/docker/api/types/backend/backend.go new file mode 100644 index 000000000..ef1e669c3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/backend/backend.go @@ -0,0 +1,128 @@ +// Package backend includes types to send information to server backends. +package backend // import "github.com/docker/docker/api/types/backend" + +import ( + "io" + "time" + + "github.com/docker/docker/api/types/container" +) + +// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. +type ContainerAttachConfig struct { + GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) + UseStdin bool + UseStdout bool + UseStderr bool + Logs bool + Stream bool + DetachKeys string + + // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/stderr messages accordingly. + // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... + // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. + // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. + MuxStreams bool +} + +// PartialLogMetaData provides meta data for a partial log message. Messages +// exceeding a predefined size are split into chunks with this metadata. The +// expectation is for the logger endpoints to assemble the chunks using this +// metadata. +type PartialLogMetaData struct { + Last bool //true if this message is last of a partial + ID string // identifies group of messages comprising a single record + Ordinal int // ordering of message in partial group +} + +// LogMessage is datastructure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +// changes to this struct need to be reflect in the reset method in +// daemon/logger/logger.go +type LogMessage struct { + Line []byte + Source string + Timestamp time.Time + Attrs []LogAttr + PLogMetaData *PartialLogMetaData + + // Err is an error associated with a message. Completeness of a message + // with Err is not expected, tho it may be partially complete (fields may + // be missing, gibberish, or nil) + Err error +} + +// LogAttr is used to hold the extra attributes available in the log message. +type LogAttr struct { + Key string + Value string +} + +// LogSelector is a list of services and tasks that should be returned as part +// of a log stream. It is similar to swarmapi.LogSelector, with the difference +// that the names don't have to be resolved to IDs; this is mostly to avoid +// accidents later where a swarmapi LogSelector might have been incorrectly +// used verbatim (and to avoid the handler having to import swarmapi types) +type LogSelector struct { + Services []string + Tasks []string +} + +// ContainerStatsConfig holds information for configuring the runtime +// behavior of a backend.ContainerStats() call. +type ContainerStatsConfig struct { + Stream bool + OutStream io.Writer + Version string +} + +// ExecInspect holds information about a running process started +// with docker exec. +type ExecInspect struct { + ID string + Running bool + ExitCode *int + ProcessConfig *ExecProcessConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Pid int +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} + +// CreateImageConfig is the configuration for creating an image from a +// container. +type CreateImageConfig struct { + Repo string + Tag string + Pause bool + Author string + Comment string + Config *container.Config + Changes []string +} + +// CommitConfig is the configuration for creating an image as part of a build. +type CommitConfig struct { + Author string + Comment string + Config *container.Config + ContainerConfig *container.Config + ContainerID string + ContainerMountLabel string + ContainerOS string + ParentImageID string +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/backend/build.go b/extender/code/vendor/github.com/docker/docker/api/types/backend/build.go new file mode 100644 index 000000000..1a2e59f2f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/backend/build.go @@ -0,0 +1,45 @@ +package backend // import "github.com/docker/docker/api/types/backend" + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/streamformatter" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// PullOption defines different modes for accessing images +type PullOption int + +const ( + // PullOptionNoPull only returns local images + PullOptionNoPull PullOption = iota + // PullOptionForcePull always tries to pull a ref from the registry first + PullOptionForcePull + // PullOptionPreferLocal uses local image if it exists, otherwise pulls + PullOptionPreferLocal +) + +// ProgressWriter is a data object to transport progress streams to the client +type ProgressWriter struct { + Output io.Writer + StdoutFormatter io.Writer + StderrFormatter io.Writer + AuxFormatter *streamformatter.AuxFormatter + ProgressReaderFunc func(io.ReadCloser) io.ReadCloser +} + +// BuildConfig is the configuration used by a BuildManager to start a build +type BuildConfig struct { + Source io.ReadCloser + ProgressWriter ProgressWriter + Options *types.ImageBuildOptions +} + +// GetImageAndLayerOptions are the options supported by GetImageAndReleasableLayer +type GetImageAndLayerOptions struct { + PullOption PullOption + AuthConfig map[string]types.AuthConfig + Output io.Writer + Platform *specs.Platform +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/extender/code/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go new file mode 100644 index 000000000..bf3463b90 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev // import "github.com/docker/docker/api/types/blkiodev" + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/client.go b/extender/code/vendor/github.com/docker/docker/api/types/client.go new file mode 100644 index 000000000..4b9f50282 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/client.go @@ -0,0 +1,415 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "bufio" + "io" + "net" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + units "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string +} + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit = "2" +) + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc + Platform string +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +//ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/configs.go b/extender/code/vendor/github.com/docker/docker/api/types/configs.go new file mode 100644 index 000000000..178e911a7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/configs.go @@ -0,0 +1,64 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} + +// NetworkListConfig stores the options available for listing networks +type NetworkListConfig struct { + // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here + Detailed bool + Verbose bool +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/container/config.go b/extender/code/vendor/github.com/docker/docker/api/types/container/config.go new file mode 100644 index 000000000..89ad08c23 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/container/config.go @@ -0,0 +1,69 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/container/container_changes.go b/extender/code/vendor/github.com/docker/docker/api/types/container/container_changes.go new file mode 100644 index 000000000..c909d6ca3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerChangeResponseItem change item in response to ContainerChanges operation +// swagger:model ContainerChangeResponseItem +type ContainerChangeResponseItem struct { + + // Kind of change + // Required: true + Kind uint8 `json:"Kind"` + + // Path to file that has changed + // Required: true + Path string `json:"Path"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/container/container_create.go b/extender/code/vendor/github.com/docker/docker/api/types/container/container_create.go new file mode 100644 index 000000000..49efa0f2c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody OK response to ContainerCreate operation +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/container/container_top.go b/extender/code/vendor/github.com/docker/docker/api/types/container/container_top.go new file mode 100644 index 000000000..ba41edcf3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerTopOKBody OK response to ContainerTop operation +// swagger:model ContainerTopOKBody +type ContainerTopOKBody struct { + + // Each process running in the container, where each is process is an array of values corresponding to the titles + // Required: true + Processes [][]string `json:"Processes"` + + // The ps column titles + // Required: true + Titles []string `json:"Titles"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/container/container_update.go b/extender/code/vendor/github.com/docker/docker/api/types/container/container_update.go new file mode 100644 index 000000000..7630ae54c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody OK response to ContainerUpdate operation +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/container/container_wait.go b/extender/code/vendor/github.com/docker/docker/api/types/container/container_wait.go new file mode 100644 index 000000000..9e3910a6b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -0,0 +1,29 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBodyError container waiting error, if any +// swagger:model ContainerWaitOKBodyError +type ContainerWaitOKBodyError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} + +// ContainerWaitOKBody OK response to ContainerWait operation +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // error + // Required: true + Error *ContainerWaitOKBodyError `json:"Error"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/container/host_config.go b/extender/code/vendor/github.com/docker/docker/api/types/container/host_config.go new file mode 100644 index 000000000..c71010770 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -0,0 +1,425 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +const ( + // IsolationEmpty is unspecified (same behavior as default) + IsolationEmpty = Isolation("") + // IsolationDefault is the default isolation mode on current daemon + IsolationDefault = Isolation("default") + // IsolationProcess is process isolation mode + IsolationProcess = Isolation("process") + // IsolationHyperV is HyperV isolation mode + IsolationHyperV = Isolation("hyperv") +) + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. +func (n IpcMode) IsPrivate() bool { + return n == "private" +} + +// IsHost indicates whether the container shares the host's ipc namespace. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsShareable indicates whether the container's ipc namespace can be shared with another container. +func (n IpcMode) IsShareable() bool { + return n == "shareable" +} + +// IsContainer indicates whether the container uses another container's ipc namespace. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == "none" +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. +func (n IpcMode) Valid() bool { + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == "container" { + return parts[1] + } + return "" +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceRequest represents a request for devices from a device driver. +// Used by GPU device drivers. +type DeviceRequest struct { + Driver string // Name of device driver + Count int // Number of devices to request (-1 = All) + DeviceIDs []string // List of device IDs as recognizable by the device driver + Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") + Options map[string]string // Options to pass onto the device driver +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DeviceRequests []DeviceRequest // List of device requests for device drivers + DiskQuota int64 // Disk limit (in bytes) + KernelMemory int64 // Kernel memory limit (in bytes) + KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set) + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + ConsoleSize [2]uint // Initial console size (height,width) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/extender/code/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go new file mode 100644 index 000000000..cf6fdf440 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -0,0 +1,41 @@ +// +build !windows + +package container // import "github.com/docker/docker/api/types/container" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/extender/code/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go new file mode 100644 index 000000000..99f803a5b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -0,0 +1,40 @@ +package container // import "github.com/docker/docker/api/types/container" + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsContainer() { + return "container" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/extender/code/vendor/github.com/docker/docker/api/types/container/waitcondition.go new file mode 100644 index 000000000..cd8311f99 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/extender/code/vendor/github.com/docker/docker/api/types/error_response.go b/extender/code/vendor/github.com/docker/docker/api/types/error_response.go new file mode 100644 index 000000000..dc942d9d9 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/filters/parse.go b/extender/code/vendor/github.com/docker/docker/api/types/filters/parse.go new file mode 100644 index 000000000..d8f19ae22 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -0,0 +1,366 @@ +/*Package filters provides tools for encoding a mapping of keys to a set of +multiple values. +*/ +package filters // import "github.com/docker/docker/api/types/filters" + +import ( + "encoding/json" + "errors" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores a mapping of keys to a set of multiple values. +type Args struct { + fields map[string]map[string]bool +} + +// KeyValuePair are used to initialize a new Args +type KeyValuePair struct { + Key string + Value string +} + +// Arg creates a new KeyValuePair for initializing Args +func Arg(key, value string) KeyValuePair { + return KeyValuePair{Key: key, Value: value} +} + +// NewArgs returns a new Args populated with the initial args +func NewArgs(initialArgs ...KeyValuePair) Args { + args := Args{fields: map[string]map[string]bool{}} + for _, arg := range initialArgs { + args.Add(arg.Key, arg.Value) + } + return args +} + +// ParseFlag parses a key=value string and adds it to an Args. +// +// Deprecated: Use Args.Add() +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned when a filter is not in the form key=value +// +// Deprecated: this error will be removed in a future version +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam encodes the Args as args JSON encoded string +// +// Deprecated: use ToJSON +func ToParam(a Args) (string, error) { + return ToJSON(a) +} + +// MarshalJSON returns a JSON byte representation of the Args +func (args Args) MarshalJSON() ([]byte, error) { + if len(args.fields) == 0 { + return []byte{}, nil + } + return json.Marshal(args.fields) +} + +// ToJSON returns the Args as a JSON encoded string +func ToJSON(a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + buf, err := json.Marshal(a) + return string(buf), err +} + +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: Use ToJSON +func ToParamWithVersion(version string, a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + + if version != "" && versions.LessThan(version, "1.22") { + buf, err := json.Marshal(convertArgsToSlice(a.fields)) + return string(buf), err + } + + return ToJSON(a) +} + +// FromParam decodes a JSON encoded string into Args +// +// Deprecated: use FromJSON +func FromParam(p string) (Args, error) { + return FromJSON(p) +} + +// FromJSON decodes a JSON encoded string into Args +func FromJSON(p string) (Args, error) { + args := NewArgs() + + if p == "" { + return args, nil + } + + raw := []byte(p) + err := json.Unmarshal(raw, &args) + if err == nil { + return args, nil + } + + // Fallback to parsing arguments in the legacy slice format + deprecated := map[string][]string{} + if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { + return args, err + } + + args.fields = deprecatedArgs(deprecated) + return args, nil +} + +// UnmarshalJSON populates the Args from JSON encode bytes +func (args Args) UnmarshalJSON(raw []byte) error { + if len(raw) == 0 { + return nil + } + return json.Unmarshal(raw, &args.fields) +} + +// Get returns the list of values associated with the key +func (args Args) Get(key string) []string { + values := args.fields[key] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add a new value to the set of values +func (args Args) Add(key, value string) { + if _, ok := args.fields[key]; ok { + args.fields[key][value] = true + } else { + args.fields[key] = map[string]bool{value: true} + } +} + +// Del removes a value from the set +func (args Args) Del(key, value string) { + if _, ok := args.fields[key]; ok { + delete(args.fields[key], value) + if len(args.fields[key]) == 0 { + delete(args.fields, key) + } + } +} + +// Len returns the number of keys in the mapping +func (args Args) Len() int { + return len(args.fields) +} + +// MatchKVList returns true if all the pairs in sources exist as key=value +// pairs in the mapping at key, or if there are no values at key. +func (args Args) MatchKVList(key string, sources map[string]string) bool { + fieldValues := args.fields[key] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for value := range fieldValues { + testKV := strings.SplitN(value, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if any of the values at key match the source string +func (args Args) Match(field, source string) bool { + if args.ExactMatch(field, source) { + return true + } + + fieldValues := args.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the values. +func (args Args) ExactMatch(key, source string) bool { + fieldValues, ok := args.fields[key] + //do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one value and the source +// matches exactly the value. +func (args Args) UniqueExactMatch(key, source string) bool { + fieldValues := args.fields[key] + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(args.fields[key]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one value, or the +// source has one of the values as a prefix. +func (args Args) FuzzyMatch(key, source string) bool { + if args.ExactMatch(key, source) { + return true + } + + fieldValues := args.fields[key] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Include returns true if the key exists in the mapping +// +// Deprecated: use Contains +func (args Args) Include(field string) bool { + _, ok := args.fields[field] + return ok +} + +// Contains returns true if the key exists in the mapping +func (args Args) Contains(field string) bool { + _, ok := args.fields[field] + return ok +} + +type invalidFilter string + +func (e invalidFilter) Error() string { + return "Invalid filter '" + string(e) + "'" +} + +func (invalidFilter) InvalidParameter() {} + +// Validate compared the set of accepted keys against the keys in the mapping. +// An error is returned if any mapping keys are not in the accepted set. +func (args Args) Validate(accepted map[string]bool) error { + for name := range args.fields { + if !accepted[name] { + return invalidFilter(name) + } + } + return nil +} + +// WalkValues iterates over the list of values for a key in the mapping and calls +// op() for each value. If op returns an error the iteration stops and the +// error is returned. +func (args Args) WalkValues(field string, op func(value string) error) error { + if _, ok := args.fields[field]; !ok { + return nil + } + for v := range args.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +// Clone returns a copy of args. +func (args Args) Clone() (newArgs Args) { + newArgs.fields = make(map[string]map[string]bool, len(args.fields)) + for k, m := range args.fields { + var mm map[string]bool + if m != nil { + mm = make(map[string]bool, len(m)) + for kk, v := range m { + mm[kk] = v + } + } + newArgs.fields[k] = mm + } + return newArgs +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/extender/code/vendor/github.com/docker/docker/api/types/graph_driver_data.go new file mode 100644 index 000000000..4d9bf1c62 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/graph_driver_data.go @@ -0,0 +1,17 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// GraphDriverData Information about a container's graph driver. +// swagger:model GraphDriverData +type GraphDriverData struct { + + // data + // Required: true + Data map[string]string `json:"Data"` + + // name + // Required: true + Name string `json:"Name"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/id_response.go b/extender/code/vendor/github.com/docker/docker/api/types/id_response.go new file mode 100644 index 000000000..7592d2f8b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/extender/code/vendor/github.com/docker/docker/api/types/image_delete_response_item.go new file mode 100644 index 000000000..b9a65a0d8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/image_delete_response_item.go @@ -0,0 +1,15 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageDeleteResponseItem image delete response item +// swagger:model ImageDeleteResponseItem +type ImageDeleteResponseItem struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/image_summary.go b/extender/code/vendor/github.com/docker/docker/api/types/image_summary.go new file mode 100644 index 000000000..e145b3dcf --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/mount/mount.go b/extender/code/vendor/github.com/docker/docker/api/types/mount/mount.go new file mode 100644 index 000000000..ab4446b38 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -0,0 +1,131 @@ +package mount // import "github.com/docker/docker/api/types/mount" + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" + // TypeNamedPipe is the type for mounting Windows named pipes + TypeNamedPipe Type = "npipe" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/network/network.go b/extender/code/vendor/github.com/docker/docker/api/types/network/network.go new file mode 100644 index 000000000..71e97338f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/network/network.go @@ -0,0 +1,127 @@ +package network // import "github.com/docker/docker/api/types/network" +import ( + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/errdefs" +) + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string //Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// PeerInfo represents one peer of an overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string + DriverOpts map[string]string +} + +// Task carries the information about one backend task +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo represents service parameters with the list of service's tasks +type ServiceInfo struct { + VIP string + Ports []string + LocalLBIndex int + Tasks []Task +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + return &epCopy +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} + +var acceptedFilters = map[string]bool{ + "dangling": true, + "driver": true, + "id": true, + "label": true, + "name": true, + "scope": true, + "type": true, +} + +// ValidateFilters validates the list of filter args with the available filters. +func ValidateFilters(filter filters.Args) error { + return errdefs.InvalidParameter(filter.Validate(acceptedFilters)) +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/plugin.go b/extender/code/vendor/github.com/docker/docker/api/types/plugin.go new file mode 100644 index 000000000..abae48b9a --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/plugin.go @@ -0,0 +1,203 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True if the plugin is running. False if the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // Docker Version used to create the plugin + DockerVersion string `json:"DockerVersion,omitempty"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // ipc host + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // pid host + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // Protocol to use for clients connecting to the plugin. + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/plugin_device.go b/extender/code/vendor/github.com/docker/docker/api/types/plugin_device.go new file mode 100644 index 000000000..569901067 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/plugin_env.go b/extender/code/vendor/github.com/docker/docker/api/types/plugin_env.go new file mode 100644 index 000000000..32962dc2e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/extender/code/vendor/github.com/docker/docker/api/types/plugin_interface_type.go new file mode 100644 index 000000000..c82f204e8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/plugin_mount.go b/extender/code/vendor/github.com/docker/docker/api/types/plugin_mount.go new file mode 100644 index 000000000..5c031cf8b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/plugin_responses.go b/extender/code/vendor/github.com/docker/docker/api/types/plugin_responses.go new file mode 100644 index 000000000..60d1fb5ad --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -0,0 +1,71 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "encoding/json" + "fmt" + "sort" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege + +func (s PluginPrivileges) Len() int { + return len(s) +} + +func (s PluginPrivileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s PluginPrivileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go b/extender/code/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go new file mode 100644 index 000000000..5ced16895 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go @@ -0,0 +1,716 @@ +// Code generated by protoc-gen-gogo. +// source: entry.proto +// DO NOT EDIT! + +/* + Package logdriver is a generated protocol buffer package. + + It is generated from these files: + entry.proto + + It has these top-level messages: + LogEntry + PartialLogEntryMetadata +*/ +package logdriver + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type LogEntry struct { + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + TimeNano int64 `protobuf:"varint,2,opt,name=time_nano,json=timeNano,proto3" json:"time_nano,omitempty"` + Line []byte `protobuf:"bytes,3,opt,name=line,proto3" json:"line,omitempty"` + Partial bool `protobuf:"varint,4,opt,name=partial,proto3" json:"partial,omitempty"` + PartialLogMetadata *PartialLogEntryMetadata `protobuf:"bytes,5,opt,name=partial_log_metadata,json=partialLogMetadata" json:"partial_log_metadata,omitempty"` +} + +func (m *LogEntry) Reset() { *m = LogEntry{} } +func (m *LogEntry) String() string { return proto.CompactTextString(m) } +func (*LogEntry) ProtoMessage() {} +func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptorEntry, []int{0} } + +func (m *LogEntry) GetSource() string { + if m != nil { + return m.Source + } + return "" +} + +func (m *LogEntry) GetTimeNano() int64 { + if m != nil { + return m.TimeNano + } + return 0 +} + +func (m *LogEntry) GetLine() []byte { + if m != nil { + return m.Line + } + return nil +} + +func (m *LogEntry) GetPartial() bool { + if m != nil { + return m.Partial + } + return false +} + +func (m *LogEntry) GetPartialLogMetadata() *PartialLogEntryMetadata { + if m != nil { + return m.PartialLogMetadata + } + return nil +} + +type PartialLogEntryMetadata struct { + Last bool `protobuf:"varint,1,opt,name=last,proto3" json:"last,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Ordinal int32 `protobuf:"varint,3,opt,name=ordinal,proto3" json:"ordinal,omitempty"` +} + +func (m *PartialLogEntryMetadata) Reset() { *m = PartialLogEntryMetadata{} } +func (m *PartialLogEntryMetadata) String() string { return proto.CompactTextString(m) } +func (*PartialLogEntryMetadata) ProtoMessage() {} +func (*PartialLogEntryMetadata) Descriptor() ([]byte, []int) { return fileDescriptorEntry, []int{1} } + +func (m *PartialLogEntryMetadata) GetLast() bool { + if m != nil { + return m.Last + } + return false +} + +func (m *PartialLogEntryMetadata) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *PartialLogEntryMetadata) GetOrdinal() int32 { + if m != nil { + return m.Ordinal + } + return 0 +} + +func init() { + proto.RegisterType((*LogEntry)(nil), "LogEntry") + proto.RegisterType((*PartialLogEntryMetadata)(nil), "PartialLogEntryMetadata") +} +func (m *LogEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Source) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEntry(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + } + if m.TimeNano != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEntry(dAtA, i, uint64(m.TimeNano)) + } + if len(m.Line) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEntry(dAtA, i, uint64(len(m.Line))) + i += copy(dAtA[i:], m.Line) + } + if m.Partial { + dAtA[i] = 0x20 + i++ + if m.Partial { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.PartialLogMetadata != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintEntry(dAtA, i, uint64(m.PartialLogMetadata.Size())) + n1, err := m.PartialLogMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *PartialLogEntryMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialLogEntryMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Last { + dAtA[i] = 0x8 + i++ + if m.Last { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Id) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintEntry(dAtA, i, uint64(len(m.Id))) + i += copy(dAtA[i:], m.Id) + } + if m.Ordinal != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintEntry(dAtA, i, uint64(m.Ordinal)) + } + return i, nil +} + +func encodeFixed64Entry(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Entry(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintEntry(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *LogEntry) Size() (n int) { + var l int + _ = l + l = len(m.Source) + if l > 0 { + n += 1 + l + sovEntry(uint64(l)) + } + if m.TimeNano != 0 { + n += 1 + sovEntry(uint64(m.TimeNano)) + } + l = len(m.Line) + if l > 0 { + n += 1 + l + sovEntry(uint64(l)) + } + if m.Partial { + n += 2 + } + if m.PartialLogMetadata != nil { + l = m.PartialLogMetadata.Size() + n += 1 + l + sovEntry(uint64(l)) + } + return n +} + +func (m *PartialLogEntryMetadata) Size() (n int) { + var l int + _ = l + if m.Last { + n += 2 + } + l = len(m.Id) + if l > 0 { + n += 1 + l + sovEntry(uint64(l)) + } + if m.Ordinal != 0 { + n += 1 + sovEntry(uint64(m.Ordinal)) + } + return n +} + +func sovEntry(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEntry(x uint64) (n int) { + return sovEntry(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LogEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEntry + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeNano", wireType) + } + m.TimeNano = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeNano |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEntry + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Line = append(m.Line[:0], dAtA[iNdEx:postIndex]...) + if m.Line == nil { + m.Line = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partial", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Partial = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialLogMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEntry + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PartialLogMetadata == nil { + m.PartialLogMetadata = &PartialLogEntryMetadata{} + } + if err := m.PartialLogMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEntry(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEntry + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartialLogEntryMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialLogEntryMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialLogEntryMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Last", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Last = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEntry + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ordinal", wireType) + } + m.Ordinal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Ordinal |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEntry(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEntry + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEntry(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEntry + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEntry(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEntry = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEntry = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("entry.proto", fileDescriptorEntry) } + +var fileDescriptorEntry = []byte{ + // 237 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x90, 0xbd, 0x4a, 0x04, 0x31, + 0x14, 0x85, 0xb9, 0xb3, 0x3f, 0xce, 0xdc, 0x5d, 0x2c, 0x82, 0x68, 0x40, 0x18, 0xc2, 0x56, 0xa9, + 0xb6, 0xd0, 0x37, 0x10, 0x6c, 0x44, 0x45, 0xd2, 0x58, 0x0e, 0x57, 0x27, 0x2c, 0x81, 0xd9, 0xdc, + 0x21, 0x13, 0x0b, 0x1f, 0xcd, 0x37, 0xb0, 0xf4, 0x11, 0x64, 0x9e, 0x44, 0x26, 0x4e, 0xec, 0xec, + 0xce, 0x39, 0x5f, 0x8a, 0x2f, 0x17, 0x37, 0xd6, 0xc7, 0xf0, 0xbe, 0xef, 0x03, 0x47, 0xde, 0x7d, + 0x00, 0x96, 0xf7, 0x7c, 0xb8, 0x9d, 0x26, 0x71, 0x8e, 0xeb, 0x81, 0xdf, 0xc2, 0xab, 0x95, 0xa0, + 0x40, 0x57, 0x66, 0x6e, 0xe2, 0x12, 0xab, 0xe8, 0x8e, 0xb6, 0xf1, 0xe4, 0x59, 0x16, 0x0a, 0xf4, + 0xc2, 0x94, 0xd3, 0xf0, 0x48, 0x9e, 0x85, 0xc0, 0x65, 0xe7, 0xbc, 0x95, 0x0b, 0x05, 0x7a, 0x6b, + 0x52, 0x16, 0x12, 0x4f, 0x7a, 0x0a, 0xd1, 0x51, 0x27, 0x97, 0x0a, 0x74, 0x69, 0x72, 0x15, 0x77, + 0x78, 0x36, 0xc7, 0xa6, 0xe3, 0x43, 0x73, 0xb4, 0x91, 0x5a, 0x8a, 0x24, 0x57, 0x0a, 0xf4, 0xe6, + 0x4a, 0xee, 0x9f, 0x7e, 0x61, 0x56, 0x7a, 0x98, 0xb9, 0x11, 0xfd, 0x1f, 0xc8, 0xdb, 0xee, 0x19, + 0x2f, 0xfe, 0x79, 0x9e, 0xa4, 0x68, 0x88, 0xe9, 0x1f, 0xa5, 0x49, 0x59, 0x9c, 0x62, 0xe1, 0xda, + 0xa4, 0x5f, 0x99, 0xc2, 0xb5, 0x93, 0x24, 0x87, 0xd6, 0x79, 0xea, 0x92, 0xfb, 0xca, 0xe4, 0x7a, + 0xb3, 0xfd, 0x1c, 0x6b, 0xf8, 0x1a, 0x6b, 0xf8, 0x1e, 0x6b, 0x78, 0x59, 0xa7, 0x4b, 0x5d, 0xff, + 0x04, 0x00, 0x00, 0xff, 0xff, 0x8f, 0xed, 0x9f, 0xb6, 0x38, 0x01, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.proto b/extender/code/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.proto new file mode 100644 index 000000000..75be3d641 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +message LogEntry { + string source = 1; + int64 time_nano = 2; + bytes line = 3; + bool partial = 4; + PartialLogEntryMetadata partial_log_metadata = 5; +} + +message PartialLogEntryMetadata { + bool last = 1; + string id = 2; + int32 ordinal = 3; +} + diff --git a/extender/code/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go b/extender/code/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go new file mode 100644 index 000000000..e5f10b5e0 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc --gogofast_out=import_path=github.com/docker/docker/api/types/plugins/logdriver:. entry.proto + +package logdriver // import "github.com/docker/docker/api/types/plugins/logdriver" diff --git a/extender/code/vendor/github.com/docker/docker/api/types/plugins/logdriver/io.go b/extender/code/vendor/github.com/docker/docker/api/types/plugins/logdriver/io.go new file mode 100644 index 000000000..9081b3b45 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/plugins/logdriver/io.go @@ -0,0 +1,87 @@ +package logdriver // import "github.com/docker/docker/api/types/plugins/logdriver" + +import ( + "encoding/binary" + "io" +) + +const binaryEncodeLen = 4 + +// LogEntryEncoder encodes a LogEntry to a protobuf stream +// The stream should look like: +// +// [uint32 binary encoded message size][protobuf message] +// +// To decode an entry, read the first 4 bytes to get the size of the entry, +// then read `size` bytes from the stream. +type LogEntryEncoder interface { + Encode(*LogEntry) error +} + +// NewLogEntryEncoder creates a protobuf stream encoder for log entries. +// This is used to write out log entries to a stream. +func NewLogEntryEncoder(w io.Writer) LogEntryEncoder { + return &logEntryEncoder{ + w: w, + buf: make([]byte, 1024), + } +} + +type logEntryEncoder struct { + buf []byte + w io.Writer +} + +func (e *logEntryEncoder) Encode(l *LogEntry) error { + n := l.Size() + + total := n + binaryEncodeLen + if total > len(e.buf) { + e.buf = make([]byte, total) + } + binary.BigEndian.PutUint32(e.buf, uint32(n)) + + if _, err := l.MarshalTo(e.buf[binaryEncodeLen:]); err != nil { + return err + } + _, err := e.w.Write(e.buf[:total]) + return err +} + +// LogEntryDecoder decodes log entries from a stream +// It is expected that the wire format is as defined by LogEntryEncoder. +type LogEntryDecoder interface { + Decode(*LogEntry) error +} + +// NewLogEntryDecoder creates a new stream decoder for log entries +func NewLogEntryDecoder(r io.Reader) LogEntryDecoder { + return &logEntryDecoder{ + lenBuf: make([]byte, binaryEncodeLen), + buf: make([]byte, 1024), + r: r, + } +} + +type logEntryDecoder struct { + r io.Reader + lenBuf []byte + buf []byte +} + +func (d *logEntryDecoder) Decode(l *LogEntry) error { + _, err := io.ReadFull(d.r, d.lenBuf) + if err != nil { + return err + } + + size := int(binary.BigEndian.Uint32(d.lenBuf)) + if len(d.buf) < size { + d.buf = make([]byte, size) + } + + if _, err := io.ReadFull(d.r, d.buf[:size]); err != nil { + return err + } + return l.Unmarshal(d.buf[:size]) +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/port.go b/extender/code/vendor/github.com/docker/docker/api/types/port.go new file mode 100644 index 000000000..d91234744 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // Host IP address that the container's port is mapped to + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/extender/code/vendor/github.com/docker/docker/api/types/registry/authenticate.go new file mode 100644 index 000000000..f0a2113e4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/registry/registry.go b/extender/code/vendor/github.com/docker/docker/api/types/registry/registry.go new file mode 100644 index 000000000..8789ad3b3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -0,0 +1,119 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +import ( + "encoding/json" + "net" + + "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor v1.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []v1.Platform +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/seccomp.go b/extender/code/vendor/github.com/docker/docker/api/types/seccomp.go new file mode 100644 index 000000000..2259c6be1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/seccomp.go @@ -0,0 +1,94 @@ +package types // import "github.com/docker/docker/api/types" + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + // Architectures is kept to maintain backward compatibility with the old + // seccomp profile. + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Architecture is used to represent a specific architecture +// and its sub-architectures +type Architecture struct { + Arch Arch `json:"architecture"` + SubArches []Arch `json:"subArchitectures"` +} + +// Arch used for architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + ActKill Action = "SCMP_ACT_KILL" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Filter is used to conditionally apply Seccomp rules +type Filter struct { + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` + MinKernel string `json:"minKernel,omitempty"` +} + +// Syscall is used to match a group of syscalls in Seccomp +type Syscall struct { + Name string `json:"name,omitempty"` + Names []string `json:"names,omitempty"` + Action Action `json:"action"` + Args []*Arg `json:"args"` + Comment string `json:"comment"` + Includes Filter `json:"includes"` + Excludes Filter `json:"excludes"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/service_update_response.go b/extender/code/vendor/github.com/docker/docker/api/types/service_update_response.go new file mode 100644 index 000000000..74ea64b1b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/stats.go b/extender/code/vendor/github.com/docker/docker/api/types/stats.go new file mode 100644 index 000000000..20daebed1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/stats.go @@ -0,0 +1,181 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types // import "github.com/docker/docker/api/types" + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/extender/code/vendor/github.com/docker/docker/api/types/strslice/strslice.go new file mode 100644 index 000000000..82921cebc --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice // import "github.com/docker/docker/api/types/strslice" + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/common.go b/extender/code/vendor/github.com/docker/docker/api/types/swarm/common.go new file mode 100644 index 000000000..ef020f458 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -0,0 +1,40 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging, secrets backend). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/config.go b/extender/code/vendor/github.com/docker/docker/api/types/swarm/config.go new file mode 100644 index 000000000..16202ccce --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -0,0 +1,40 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReferenceRuntimeTarget is a target for a config specifying that it +// isn't mounted into the container but instead has some other purpose. +type ConfigReferenceRuntimeTarget struct{} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget `json:",omitempty"` + Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` + ConfigID string + ConfigName string +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/container.go b/extender/code/vendor/github.com/docker/docker/api/types/swarm/container.go new file mode 100644 index 000000000..48190c176 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -0,0 +1,76 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + Config string + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/network.go b/extender/code/vendor/github.com/docker/docker/api/types/swarm/network.go new file mode 100644 index 000000000..98ef3284d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -0,0 +1,121 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "github.com/docker/docker/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" + // PortConfigProtocolSCTP SCTP + PortConfigProtocolSCTP PortConfigProtocol = "sctp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/node.go b/extender/code/vendor/github.com/docker/docker/api/types/swarm/node.go new file mode 100644 index 000000000..1e30f5fa1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -0,0 +1,115 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/extender/code/vendor/github.com/docker/docker/api/types/swarm/runtime.go new file mode 100644 index 000000000..0c77403cc --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -0,0 +1,27 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/extender/code/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 000000000..98c2806c3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/extender/code/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 000000000..1fdc9b043 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,712 @@ +// Code generated by protoc-gen-gogo. +// source: plugin.proto +// DO NOT EDIT! + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, + 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, + 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, + 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, + 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, + 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, + 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, + 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, + 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, + 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, + 0x0c, 0x01, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/extender/code/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 000000000..6d63b7783 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/secret.go b/extender/code/vendor/github.com/docker/docker/api/types/swarm/secret.go new file mode 100644 index 000000000..d5213ec98 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -0,0 +1,36 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` + Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/service.go b/extender/code/vendor/github.com/docker/docker/api/types/swarm/service.go new file mode 100644 index 000000000..abf192e75 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -0,0 +1,124 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback = "rollback" + + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order string +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/extender/code/vendor/github.com/docker/docker/api/types/swarm/swarm.go new file mode 100644 index 000000000..b25f99964 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -0,0 +1,227 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" +) + +// ClusterInfo represents info about the cluster for outputting in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool + DefaultAddrPool []string + SubnetSize uint32 + DataPathPort uint32 +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + DataPathPort uint32 + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability + DefaultAddrPool []string + SubnetSize uint32 +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` + + Warnings []string `json:",omitempty"` +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/swarm/task.go b/extender/code/vendor/github.com/docker/docker/api/types/swarm/task.go new file mode 100644 index 000000000..d5a57df5d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -0,0 +1,192 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" + // TaskStateRemove REMOVE + TaskStateRemove TaskState = "remove" + // TaskStateOrphaned ORPHANED + TaskStateOrphaned TaskState = "orphaned" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` + + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory). +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Resources `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + MaxReplicas uint64 `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus *ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string + PID int + ExitCode int +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/types.go b/extender/code/vendor/github.com/docker/docker/api/types/types.go new file mode 100644 index 000000000..a39ffcb7b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/types.go @@ -0,0 +1,615 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + OsVersion string `json:",omitempty"` + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + LastTagTime time.Time `json:",omitempty"` +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + OSType string + Experimental bool + BuilderVersion BuilderVersion +} + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + Details map[string]string `json:",omitempty"` +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Platform struct{ Name string } `json:",omitempty"` + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + KernelMemoryTCP bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + PidsLimit bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + Warnings []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only available in Docker Swarm +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + Type mount.Type `json:",omitempty"` + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]network.ServiceInfo `json:",omitempty"` +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + // Check for networks with duplicate names. + // Network is primarily keyed based on a random ID and not on the name. + // Network name is strictly a user-friendly alias to the network + // which is uniquely identified using ID. + // And there is no guaranteed way to check for duplicates. + // Option CheckDuplicate is there to provide a best effort checking of any networks + // which has the same name but it is not guaranteed to catch all name collisions. + CheckDuplicate bool + Driver string + Scope string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume + BuildCache []*BuildCache + BuilderSize int64 // deprecated +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDeleteResponseItem + SpaceReclaimed uint64 +} + +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest string + Size int +} + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} + +// BuildCache contains information about a build cache record +type BuildCache struct { + ID string + Parent string + Type string + Description string + InUse bool + Shared bool + Size int64 + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int +} + +// BuildCachePruneOptions hold parameters to prune the build cache +type BuildCachePruneOptions struct { + All bool + KeepStorage int64 + Filters filters.Args +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/versions/README.md b/extender/code/vendor/github.com/docker/docker/api/types/versions/README.md new file mode 100644 index 000000000..1ef911edb --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/versions/README.md @@ -0,0 +1,14 @@ +# Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +## Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/extender/code/vendor/github.com/docker/docker/api/types/versions/compare.go b/extender/code/vendor/github.com/docker/docker/api/types/versions/compare.go new file mode 100644 index 000000000..8ccb0aa92 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -0,0 +1,62 @@ +package versions // import "github.com/docker/docker/api/types/versions" + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/extender/code/vendor/github.com/docker/docker/api/types/volume.go b/extender/code/vendor/github.com/docker/docker/api/types/volume.go new file mode 100644 index 000000000..b5ee96a50 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/api/types/volume.go @@ -0,0 +1,69 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Date/Time the volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // + // Required: true + RefCount int64 `json:"RefCount"` + + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/builder.go b/extender/code/vendor/github.com/docker/docker/builder/builder.go new file mode 100644 index 000000000..3eb034141 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/builder.go @@ -0,0 +1,115 @@ +// Package builder defines interfaces for any Docker builder to implement. +// +// Historically, only server-side Dockerfile interpreters existed. +// This package allows for other implementations of Docker builders. +package builder // import "github.com/docker/docker/builder" + +import ( + "context" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/containerfs" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName = "Dockerfile" +) + +// Source defines a location that can be used as a source for the ADD/COPY +// instructions in the builder. +type Source interface { + // Root returns root path for accessing source + Root() containerfs.ContainerFS + // Close allows to signal that the filesystem tree won't be used anymore. + // For Context implementations using a temporary directory, it is recommended to + // delete the temporary directory in Close(). + Close() error + // Hash returns a checksum for a file + Hash(path string) (string, error) +} + +// Backend abstracts calls to a Docker Daemon. +type Backend interface { + ImageBackend + ExecBackend + + // CommitBuildStep creates a new Docker image from the config generated by + // a build step. + CommitBuildStep(backend.CommitConfig) (image.ID, error) + // ContainerCreateWorkdir creates the workdir + ContainerCreateWorkdir(containerID string) error + + CreateImage(config []byte, parent string) (Image, error) + + ImageCacheBuilder +} + +// ImageBackend are the interface methods required from an image component +type ImageBackend interface { + GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ROLayer, error) +} + +// ExecBackend contains the interface methods required for executing containers +type ExecBackend interface { + // ContainerAttachRaw attaches to container. + ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error + // ContainerCreate creates a new Docker container and returns potential warnings + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + // ContainerRm removes a container specified by `id`. + ContainerRm(name string, config *types.ContainerRmConfig) error + // ContainerKill stops the container execution abruptly. + ContainerKill(containerID string, sig uint64) error + // ContainerStart starts a new container + ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + // ContainerWait stops processing until the given container is stopped. + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) +} + +// Result is the output produced by a Builder +type Result struct { + ImageID string + FromImage Image +} + +// ImageCacheBuilder represents a generator for stateful image cache. +type ImageCacheBuilder interface { + // MakeImageCache creates a stateful image cache. + MakeImageCache(cacheFrom []string) ImageCache +} + +// ImageCache abstracts an image cache. +// (parent image, child runconfig) -> child image +type ImageCache interface { + // GetCache returns a reference to a cached image whose parent equals `parent` + // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. + GetCache(parentID string, cfg *container.Config) (imageID string, err error) +} + +// Image represents a Docker image used by the builder. +type Image interface { + ImageID() string + RunConfig() *container.Config + MarshalJSON() ([]byte, error) + OperatingSystem() string +} + +// ROLayer is a reference to image rootfs layer +type ROLayer interface { + Release() error + NewRWLayer() (RWLayer, error) + DiffID() layer.DiffID +} + +// RWLayer is active layer that can be read/modified +type RWLayer interface { + Release() error + Root() containerfs.ContainerFS + Commit() (ROLayer, error) +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go new file mode 100644 index 000000000..aa794fb7b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go @@ -0,0 +1,172 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "fmt" + "io" + + "github.com/docker/docker/runconfig/opts" +) + +// builtinAllowedBuildArgs is list of built-in allowed build args +// these args are considered transparent and are excluded from the image history. +// Filtering from history is implemented in dispatchers.go +var builtinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, +} + +// BuildArgs manages arguments used by the builder +type BuildArgs struct { + // args that are allowed for expansion/substitution and passing to commands in 'run'. + allowedBuildArgs map[string]*string + // args defined before the first `FROM` in a Dockerfile + allowedMetaArgs map[string]*string + // args referenced by the Dockerfile + referencedArgs map[string]struct{} + // args provided by the user on the command line + argsFromOptions map[string]*string +} + +// NewBuildArgs creates a new BuildArgs type +func NewBuildArgs(argsFromOptions map[string]*string) *BuildArgs { + return &BuildArgs{ + allowedBuildArgs: make(map[string]*string), + allowedMetaArgs: make(map[string]*string), + referencedArgs: make(map[string]struct{}), + argsFromOptions: argsFromOptions, + } +} + +// Clone returns a copy of the BuildArgs type +func (b *BuildArgs) Clone() *BuildArgs { + result := NewBuildArgs(b.argsFromOptions) + for k, v := range b.allowedBuildArgs { + result.allowedBuildArgs[k] = v + } + for k, v := range b.allowedMetaArgs { + result.allowedMetaArgs[k] = v + } + for k := range b.referencedArgs { + result.referencedArgs[k] = struct{}{} + } + return result +} + +// MergeReferencedArgs merges referenced args from another BuildArgs +// object into the current one +func (b *BuildArgs) MergeReferencedArgs(other *BuildArgs) { + for k := range other.referencedArgs { + b.referencedArgs[k] = struct{}{} + } +} + +// WarnOnUnusedBuildArgs checks if there are any leftover build-args that were +// passed but not consumed during build. Print a warning, if there are any. +func (b *BuildArgs) WarnOnUnusedBuildArgs(out io.Writer) { + var leftoverArgs []string + for arg := range b.argsFromOptions { + _, isReferenced := b.referencedArgs[arg] + _, isBuiltin := builtinAllowedBuildArgs[arg] + if !isBuiltin && !isReferenced { + leftoverArgs = append(leftoverArgs, arg) + } + } + if len(leftoverArgs) > 0 { + fmt.Fprintf(out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) + } +} + +// ResetAllowed clears the list of args that are allowed to be used by a +// directive +func (b *BuildArgs) ResetAllowed() { + b.allowedBuildArgs = make(map[string]*string) +} + +// AddMetaArg adds a new meta arg that can be used by FROM directives +func (b *BuildArgs) AddMetaArg(key string, value *string) { + b.allowedMetaArgs[key] = value +} + +// AddArg adds a new arg that can be used by directives +func (b *BuildArgs) AddArg(key string, value *string) { + b.allowedBuildArgs[key] = value + b.referencedArgs[key] = struct{}{} +} + +// IsReferencedOrNotBuiltin checks if the key is a built-in arg, or if it has been +// referenced by the Dockerfile. Returns true if the arg is not a builtin or +// if the builtin has been referenced in the Dockerfile. +func (b *BuildArgs) IsReferencedOrNotBuiltin(key string) bool { + _, isBuiltin := builtinAllowedBuildArgs[key] + _, isAllowed := b.allowedBuildArgs[key] + return isAllowed || !isBuiltin +} + +// GetAllAllowed returns a mapping with all the allowed args +func (b *BuildArgs) GetAllAllowed() map[string]string { + return b.getAllFromMapping(b.allowedBuildArgs) +} + +// GetAllMeta returns a mapping with all the meta args +func (b *BuildArgs) GetAllMeta() map[string]string { + return b.getAllFromMapping(b.allowedMetaArgs) +} + +func (b *BuildArgs) getAllFromMapping(source map[string]*string) map[string]string { + m := make(map[string]string) + + keys := keysFromMaps(source, builtinAllowedBuildArgs) + for _, key := range keys { + v, ok := b.getBuildArg(key, source) + if ok { + m[key] = v + } + } + return m +} + +// FilterAllowed returns all allowed args without the filtered args +func (b *BuildArgs) FilterAllowed(filter []string) []string { + envs := []string{} + configEnv := opts.ConvertKVStringsToMap(filter) + + for key, val := range b.GetAllAllowed() { + if _, ok := configEnv[key]; !ok { + envs = append(envs, fmt.Sprintf("%s=%s", key, val)) + } + } + return envs +} + +func (b *BuildArgs) getBuildArg(key string, mapping map[string]*string) (string, bool) { + defaultValue, exists := mapping[key] + // Return override from options if one is defined + if v, ok := b.argsFromOptions[key]; ok && v != nil { + return *v, ok + } + + if defaultValue == nil { + if v, ok := b.allowedMetaArgs[key]; ok && v != nil { + return *v, ok + } + return "", false + } + return *defaultValue, exists +} + +func keysFromMaps(source map[string]*string, builtin map[string]bool) []string { + keys := []string{} + for key := range source { + keys = append(keys, key) + } + for key := range builtin { + keys = append(keys, key) + } + return keys +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/builder.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/builder.go new file mode 100644 index 000000000..a6094a897 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/builder.go @@ -0,0 +1,437 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "sort" + "strings" + "time" + + "github.com/containerd/containerd/platforms" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/moby/buildkit/session" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/syncmap" +) + +var validCommitCommands = map[string]bool{ + "cmd": true, + "entrypoint": true, + "healthcheck": true, + "env": true, + "expose": true, + "label": true, + "onbuild": true, + "user": true, + "volume": true, + "workdir": true, +} + +const ( + stepFormat = "Step %d/%d : %v" +) + +// SessionGetter is object used to get access to a session by uuid +type SessionGetter interface { + Get(ctx context.Context, uuid string) (session.Caller, error) +} + +// BuildManager is shared across all Builder objects +type BuildManager struct { + idMapping *idtools.IdentityMapping + backend builder.Backend + pathCache pathCache // TODO: make this persistent + sg SessionGetter + fsCache *fscache.FSCache +} + +// NewBuildManager creates a BuildManager +func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, identityMapping *idtools.IdentityMapping) (*BuildManager, error) { + bm := &BuildManager{ + backend: b, + pathCache: &syncmap.Map{}, + sg: sg, + idMapping: identityMapping, + fsCache: fsCache, + } + if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil { + return nil, err + } + return bm, nil +} + +// Build starts a new build from a BuildConfig +func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (*builder.Result, error) { + buildsTriggered.Inc() + if config.Options.Dockerfile == "" { + config.Options.Dockerfile = builder.DefaultDockerfileName + } + + source, dockerfile, err := remotecontext.Detect(config) + if err != nil { + return nil, err + } + defer func() { + if source != nil { + if err := source.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + } + }() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if src, err := bm.initializeClientSession(ctx, cancel, config.Options); err != nil { + return nil, err + } else if src != nil { + source = src + } + + builderOptions := builderOptions{ + Options: config.Options, + ProgressWriter: config.ProgressWriter, + Backend: bm.backend, + PathCache: bm.pathCache, + IDMapping: bm.idMapping, + } + b, err := newBuilder(ctx, builderOptions) + if err != nil { + return nil, err + } + return b.build(source, dockerfile) +} + +func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) { + if options.SessionID == "" || bm.sg == nil { + return nil, nil + } + logrus.Debug("client is session enabled") + + connectCtx, cancelCtx := context.WithTimeout(ctx, sessionConnectTimeout) + defer cancelCtx() + + c, err := bm.sg.Get(connectCtx, options.SessionID) + if err != nil { + return nil, err + } + go func() { + <-c.Context().Done() + cancel() + }() + if options.RemoteContext == remotecontext.ClientSessionRemote { + st := time.Now() + csi, err := NewClientSessionSourceIdentifier(ctx, bm.sg, options.SessionID) + if err != nil { + return nil, err + } + src, err := bm.fsCache.SyncFrom(ctx, csi) + if err != nil { + return nil, err + } + logrus.Debugf("sync-time: %v", time.Since(st)) + return src, nil + } + return nil, nil +} + +// builderOptions are the dependencies required by the builder +type builderOptions struct { + Options *types.ImageBuildOptions + Backend builder.Backend + ProgressWriter backend.ProgressWriter + PathCache pathCache + IDMapping *idtools.IdentityMapping +} + +// Builder is a Dockerfile builder +// It implements the builder.Backend interface. +type Builder struct { + options *types.ImageBuildOptions + + Stdout io.Writer + Stderr io.Writer + Aux *streamformatter.AuxFormatter + Output io.Writer + + docker builder.Backend + clientCtx context.Context + + idMapping *idtools.IdentityMapping + disableCommit bool + imageSources *imageSources + pathCache pathCache + containerManager *containerManager + imageProber ImageProber + platform *specs.Platform +} + +// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options. +func newBuilder(clientCtx context.Context, options builderOptions) (*Builder, error) { + config := options.Options + if config == nil { + config = new(types.ImageBuildOptions) + } + + b := &Builder{ + clientCtx: clientCtx, + options: config, + Stdout: options.ProgressWriter.StdoutFormatter, + Stderr: options.ProgressWriter.StderrFormatter, + Aux: options.ProgressWriter.AuxFormatter, + Output: options.ProgressWriter.Output, + docker: options.Backend, + idMapping: options.IDMapping, + imageSources: newImageSources(clientCtx, options), + pathCache: options.PathCache, + imageProber: newImageProber(options.Backend, config.CacheFrom, config.NoCache), + containerManager: newContainerManager(options.Backend), + } + + // same as in Builder.Build in builder/builder-next/builder.go + // TODO: remove once config.Platform is of type specs.Platform + if config.Platform != "" { + sp, err := platforms.Parse(config.Platform) + if err != nil { + return nil, err + } + if err := system.ValidatePlatform(sp); err != nil { + return nil, err + } + b.platform = &sp + } + + return b, nil +} + +// Build 'LABEL' command(s) from '--label' options and add to the last stage +func buildLabelOptions(labels map[string]string, stages []instructions.Stage) { + keys := []string{} + for key := range labels { + keys = append(keys, key) + } + + // Sort the label to have a repeatable order + sort.Strings(keys) + for _, key := range keys { + value := labels[key] + stages[len(stages)-1].AddCommand(instructions.NewLabelCommand(key, value, true)) + } +} + +// Build runs the Dockerfile builder by parsing the Dockerfile and executing +// the instructions from the file. +func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*builder.Result, error) { + defer b.imageSources.Unmount() + + stages, metaArgs, err := instructions.Parse(dockerfile.AST) + if err != nil { + if instructions.IsUnknownInstruction(err) { + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() + } + return nil, errdefs.InvalidParameter(err) + } + if b.options.Target != "" { + targetIx, found := instructions.HasStage(stages, b.options.Target) + if !found { + buildsFailed.WithValues(metricsBuildTargetNotReachableError).Inc() + return nil, errdefs.InvalidParameter(errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target)) + } + stages = stages[:targetIx+1] + } + + // Add 'LABEL' command specified by '--label' option to the last stage + buildLabelOptions(b.options.Labels, stages) + + dockerfile.PrintWarnings(b.Stderr) + dispatchState, err := b.dispatchDockerfileWithCancellation(stages, metaArgs, dockerfile.EscapeToken, source) + if err != nil { + return nil, err + } + if dispatchState.imageID == "" { + buildsFailed.WithValues(metricsDockerfileEmptyError).Inc() + return nil, errors.New("No image was generated. Is your Dockerfile empty?") + } + return &builder.Result{ImageID: dispatchState.imageID, FromImage: dispatchState.baseImage}, nil +} + +func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error { + if aux == nil || state.imageID == "" { + return nil + } + return aux.Emit("", types.BuildResult{ID: state.imageID}) +} + +func processMetaArg(meta instructions.ArgCommand, shlex *shell.Lex, args *BuildArgs) error { + // shell.Lex currently only support the concatenated string format + envs := convertMapToEnvList(args.GetAllAllowed()) + if err := meta.Expand(func(word string) (string, error) { + return shlex.ProcessWord(word, envs) + }); err != nil { + return err + } + args.AddArg(meta.Key, meta.Value) + args.AddMetaArg(meta.Key, meta.Value) + return nil +} + +func printCommand(out io.Writer, currentCommandIndex int, totalCommands int, cmd interface{}) int { + fmt.Fprintf(out, stepFormat, currentCommandIndex, totalCommands, cmd) + fmt.Fprintln(out) + return currentCommandIndex + 1 +} + +func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions.Stage, metaArgs []instructions.ArgCommand, escapeToken rune, source builder.Source) (*dispatchState, error) { + dispatchRequest := dispatchRequest{} + buildArgs := NewBuildArgs(b.options.BuildArgs) + totalCommands := len(metaArgs) + len(parseResult) + currentCommandIndex := 1 + for _, stage := range parseResult { + totalCommands += len(stage.Commands) + } + shlex := shell.NewLex(escapeToken) + for _, meta := range metaArgs { + currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, &meta) + + err := processMetaArg(meta, shlex, buildArgs) + if err != nil { + return nil, err + } + } + + stagesResults := newStagesBuildResults() + + for _, stage := range parseResult { + if err := stagesResults.checkStageNameAvailable(stage.Name); err != nil { + return nil, err + } + dispatchRequest = newDispatchRequest(b, escapeToken, source, buildArgs, stagesResults) + + currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, stage.SourceCode) + if err := initializeStage(dispatchRequest, &stage); err != nil { + return nil, err + } + dispatchRequest.state.updateRunConfig() + fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID)) + for _, cmd := range stage.Commands { + select { + case <-b.clientCtx.Done(): + logrus.Debug("Builder: build cancelled!") + fmt.Fprint(b.Stdout, "Build cancelled\n") + buildsFailed.WithValues(metricsBuildCanceled).Inc() + return nil, errors.New("Build cancelled") + default: + // Not cancelled yet, keep going... + } + + currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, cmd) + + if err := dispatch(dispatchRequest, cmd); err != nil { + return nil, err + } + dispatchRequest.state.updateRunConfig() + fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID)) + + } + if err := emitImageID(b.Aux, dispatchRequest.state); err != nil { + return nil, err + } + buildArgs.MergeReferencedArgs(dispatchRequest.state.buildArgs) + if err := commitStage(dispatchRequest.state, stagesResults); err != nil { + return nil, err + } + } + buildArgs.WarnOnUnusedBuildArgs(b.Stdout) + return dispatchRequest.state, nil +} + +// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile +// It will: +// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. +// - Do build by calling builder.dispatch() to call all entries' handling routines +// +// BuildFromConfig is used by the /commit endpoint, with the changes +// coming from the query parameter of the same name. +// +// TODO: Remove? +func BuildFromConfig(config *container.Config, changes []string, os string) (*container.Config, error) { + if !system.IsOSSupported(os) { + return nil, errdefs.InvalidParameter(system.ErrNotSupportedOperatingSystem) + } + if len(changes) == 0 { + return config, nil + } + + dockerfile, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) + if err != nil { + return nil, errdefs.InvalidParameter(err) + } + + b, err := newBuilder(context.Background(), builderOptions{ + Options: &types.ImageBuildOptions{NoCache: true}, + }) + if err != nil { + return nil, err + } + + // ensure that the commands are valid + for _, n := range dockerfile.AST.Children { + if !validCommitCommands[n.Value] { + return nil, errdefs.InvalidParameter(errors.Errorf("%s is not a valid change command", n.Value)) + } + } + + b.Stdout = ioutil.Discard + b.Stderr = ioutil.Discard + b.disableCommit = true + + var commands []instructions.Command + for _, n := range dockerfile.AST.Children { + cmd, err := instructions.ParseCommand(n) + if err != nil { + return nil, errdefs.InvalidParameter(err) + } + commands = append(commands, cmd) + } + + dispatchRequest := newDispatchRequest(b, dockerfile.EscapeToken, nil, NewBuildArgs(b.options.BuildArgs), newStagesBuildResults()) + // We make mutations to the configuration, ensure we have a copy + dispatchRequest.state.runConfig = copyRunConfig(config) + dispatchRequest.state.imageID = config.Image + dispatchRequest.state.operatingSystem = os + for _, cmd := range commands { + err := dispatch(dispatchRequest, cmd) + if err != nil { + return nil, errdefs.InvalidParameter(err) + } + dispatchRequest.state.updateRunConfig() + } + + return dispatchRequest.state.runConfig, nil +} + +func convertMapToEnvList(m map[string]string) []string { + result := []string{} + for k, v := range m { + result = append(result, k+"="+v) + } + return result +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go new file mode 100644 index 000000000..c4453459b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +func defaultShellForOS(os string) []string { + return []string{"/bin/sh", "-c"} +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go new file mode 100644 index 000000000..fbafa52ae --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go @@ -0,0 +1,8 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +func defaultShellForOS(os string) []string { + if os == "linux" { + return []string{"/bin/sh", "-c"} + } + return []string{"cmd", "/S", "/C"} +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go new file mode 100644 index 000000000..b48090d7b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go @@ -0,0 +1,76 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "context" + "time" + + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/builder/remotecontext" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/pkg/errors" +) + +const sessionConnectTimeout = 5 * time.Second + +// ClientSessionTransport is a transport for copying files from docker client +// to the daemon. +type ClientSessionTransport struct{} + +// NewClientSessionTransport returns new ClientSessionTransport instance +func NewClientSessionTransport() *ClientSessionTransport { + return &ClientSessionTransport{} +} + +// Copy data from a remote to a destination directory. +func (cst *ClientSessionTransport) Copy(ctx context.Context, id fscache.RemoteIdentifier, dest string, cu filesync.CacheUpdater) error { + csi, ok := id.(*ClientSessionSourceIdentifier) + if !ok { + return errors.New("invalid identifier for client session") + } + + return filesync.FSSync(ctx, csi.caller, filesync.FSSendRequestOpt{ + IncludePatterns: csi.includePatterns, + DestDir: dest, + CacheUpdater: cu, + }) +} + +// ClientSessionSourceIdentifier is an identifier that can be used for requesting +// files from remote client +type ClientSessionSourceIdentifier struct { + includePatterns []string + caller session.Caller + uuid string +} + +// NewClientSessionSourceIdentifier returns new ClientSessionSourceIdentifier instance +func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string) (*ClientSessionSourceIdentifier, error) { + csi := &ClientSessionSourceIdentifier{ + uuid: uuid, + } + caller, err := sg.Get(ctx, uuid) + if err != nil { + return nil, errors.Wrapf(err, "failed to get session for %s", uuid) + } + + csi.caller = caller + return csi, nil +} + +// Transport returns transport identifier for remote identifier +func (csi *ClientSessionSourceIdentifier) Transport() string { + return remotecontext.ClientSessionRemote +} + +// SharedKey returns shared key for remote identifier. Shared key is used +// for finding the base for a repeated transfer. +func (csi *ClientSessionSourceIdentifier) SharedKey() string { + return csi.caller.SharedKey() +} + +// Key returns unique key for remote identifier. Requests with same key return +// same data. +func (csi *ClientSessionSourceIdentifier) Key() string { + return csi.uuid +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go new file mode 100644 index 000000000..54adfb13f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go @@ -0,0 +1,146 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "context" + "fmt" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type containerManager struct { + tmpContainers map[string]struct{} + backend builder.ExecBackend +} + +// newContainerManager creates a new container backend +func newContainerManager(docker builder.ExecBackend) *containerManager { + return &containerManager{ + backend: docker, + tmpContainers: make(map[string]struct{}), + } +} + +// Create a container +func (c *containerManager) Create(runConfig *container.Config, hostConfig *container.HostConfig) (container.ContainerCreateCreatedBody, error) { + container, err := c.backend.ContainerCreate(types.ContainerCreateConfig{ + Config: runConfig, + HostConfig: hostConfig, + }) + if err != nil { + return container, err + } + c.tmpContainers[container.ID] = struct{}{} + return container, nil +} + +var errCancelled = errors.New("build cancelled") + +// Run a container by ID +func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr io.Writer) (err error) { + attached := make(chan struct{}) + errCh := make(chan error) + go func() { + errCh <- c.backend.ContainerAttachRaw(cID, nil, stdout, stderr, true, attached) + }() + select { + case err := <-errCh: + return err + case <-attached: + } + + finished := make(chan struct{}) + cancelErrCh := make(chan error, 1) + go func() { + select { + case <-ctx.Done(): + logrus.Debugln("Build cancelled, killing and removing container:", cID) + c.backend.ContainerKill(cID, 0) + c.removeContainer(cID, stdout) + cancelErrCh <- errCancelled + case <-finished: + cancelErrCh <- nil + } + }() + + if err := c.backend.ContainerStart(cID, nil, "", ""); err != nil { + close(finished) + logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error()) + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + close(finished) + logCancellationError(cancelErrCh, "error from errCh: "+err.Error()) + return err + } + + waitC, err := c.backend.ContainerWait(ctx, cID, containerpkg.WaitConditionNotRunning) + if err != nil { + close(finished) + logCancellationError(cancelErrCh, fmt.Sprintf("unable to begin ContainerWait: %s", err)) + return err + } + + if status := <-waitC; status.ExitCode() != 0 { + close(finished) + logCancellationError(cancelErrCh, + fmt.Sprintf("a non-zero code from ContainerWait: %d", status.ExitCode())) + return &statusCodeError{code: status.ExitCode(), err: status.Err()} + } + + close(finished) + return <-cancelErrCh +} + +func logCancellationError(cancelErrCh chan error, msg string) { + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v): %s", cancelErr, msg) + } +} + +type statusCodeError struct { + code int + err error +} + +func (e *statusCodeError) Error() string { + if e.err == nil { + return "" + } + return e.err.Error() +} + +func (e *statusCodeError) StatusCode() int { + return e.code +} + +func (c *containerManager) removeContainer(containerID string, stdout io.Writer) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := c.backend.ContainerRm(containerID, rmConfig); err != nil { + fmt.Fprintf(stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(containerID), err) + return err + } + return nil +} + +// RemoveAll containers managed by this container manager +func (c *containerManager) RemoveAll(stdout io.Writer) { + for containerID := range c.tmpContainers { + if err := c.removeContainer(containerID, stdout); err != nil { + return + } + delete(c.tmpContainers, containerID) + fmt.Fprintf(stdout, "Removing intermediate container %s\n", stringid.TruncateID(containerID)) + } +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/copy.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/copy.go new file mode 100644 index 000000000..ad9b08dfe --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/copy.go @@ -0,0 +1,582 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "archive/tar" + "fmt" + "io" + "mime" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "time" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/urlutil" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const unnamedFilename = "__unnamed__" + +type pathCache interface { + Load(key interface{}) (value interface{}, ok bool) + Store(key, value interface{}) +} + +// copyInfo is a data object which stores the metadata about each source file in +// a copyInstruction +type copyInfo struct { + root containerfs.ContainerFS + path string + hash string + noDecompress bool +} + +func (c copyInfo) fullPath() (string, error) { + return c.root.ResolveScopedPath(c.path, true) +} + +func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo { + return copyInfo{root: source.Root(), path: path, hash: hash} +} + +func newCopyInfos(copyInfos ...copyInfo) []copyInfo { + return copyInfos +} + +// copyInstruction is a fully parsed COPY or ADD command that is passed to +// Builder.performCopy to copy files into the image filesystem +type copyInstruction struct { + cmdName string + infos []copyInfo + dest string + chownStr string + allowLocalDecompression bool +} + +// copier reads a raw COPY or ADD command, fetches remote sources using a downloader, +// and creates a copyInstruction +type copier struct { + imageSource *imageMount + source builder.Source + pathCache pathCache + download sourceDownloader + platform *specs.Platform + // for cleanup. TODO: having copier.cleanup() is error prone and hard to + // follow. Code calling performCopy should manage the lifecycle of its params. + // Copier should take override source as input, not imageMount. + activeLayer builder.RWLayer + tmpPaths []string +} + +func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier { + platform := req.builder.platform + if platform == nil { + // May be nil if not explicitly set in API/dockerfile + platform = &specs.Platform{} + } + if platform.OS == "" { + // Default to the dispatch requests operating system if not explicit in API/dockerfile + platform.OS = req.state.operatingSystem + } + if platform.OS == "" { + // This is a failsafe just in case. Shouldn't be hit. + platform.OS = runtime.GOOS + } + + return copier{ + source: req.source, + pathCache: req.builder.pathCache, + download: download, + imageSource: imageSource, + platform: platform, + } + +} + +func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) { + inst := copyInstruction{cmdName: cmdName} + last := len(args) - 1 + + // Work in platform-specific filepath semantics + // TODO: This OS switch for paths is NOT correct and should not be supported. + // Maintained for backwards compatibility + pathOS := runtime.GOOS + if o.platform != nil { + pathOS = o.platform.OS + } + inst.dest = fromSlash(args[last], pathOS) + separator := string(separator(pathOS)) + infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest) + if err != nil { + return inst, errors.Wrapf(err, "%s failed", cmdName) + } + if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) { + return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + inst.infos = infos + return inst, nil +} + +// getCopyInfosForSourcePaths iterates over the source files and calculate the info +// needed to copy (e.g. hash value if cached) +// The dest is used in case source is URL (and ends with "/") +func (o *copier) getCopyInfosForSourcePaths(sources []string, dest string) ([]copyInfo, error) { + var infos []copyInfo + for _, orig := range sources { + subinfos, err := o.getCopyInfoForSourcePath(orig, dest) + if err != nil { + return nil, err + } + infos = append(infos, subinfos...) + } + + if len(infos) == 0 { + return nil, errors.New("no source files were specified") + } + return infos, nil +} + +func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error) { + if !urlutil.IsURL(orig) { + return o.calcCopyInfo(orig, true) + } + + remote, path, err := o.download(orig) + if err != nil { + return nil, err + } + // If path == "" then we are unable to determine filename from src + // We have to make sure dest is available + if path == "" { + if strings.HasSuffix(dest, "/") { + return nil, errors.Errorf("cannot determine filename for source %s", orig) + } + path = unnamedFilename + } + o.tmpPaths = append(o.tmpPaths, remote.Root().Path()) + + hash, err := remote.Hash(path) + ci := newCopyInfoFromSource(remote, path, hash) + ci.noDecompress = true // data from http shouldn't be extracted even on ADD + return newCopyInfos(ci), err +} + +// Cleanup removes any temporary directories created as part of downloading +// remote files. +func (o *copier) Cleanup() { + for _, path := range o.tmpPaths { + os.RemoveAll(path) + } + o.tmpPaths = []string{} + if o.activeLayer != nil { + o.activeLayer.Release() + o.activeLayer = nil + } +} + +// TODO: allowWildcards can probably be removed by refactoring this function further. +func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) { + imageSource := o.imageSource + + // TODO: do this when creating copier. Requires validateCopySourcePath + // (and other below) to be aware of the difference sources. Why is it only + // done on image Source? + if imageSource != nil && o.activeLayer == nil { + // this needs to be protected against repeated calls as wildcard copy + // will call it multiple times for a single COPY + var err error + rwLayer, err := imageSource.NewRWLayer() + if err != nil { + return nil, err + } + o.activeLayer = rwLayer + + o.source, err = remotecontext.NewLazySource(rwLayer.Root()) + if err != nil { + return nil, errors.Wrapf(err, "failed to create context for copy from %s", rwLayer.Root().Path()) + } + } + + if o.source == nil { + return nil, errors.Errorf("missing build context") + } + + root := o.source.Root() + + if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil { + return nil, err + } + + // Work in source OS specific filepath semantics + // For LCOW, this is NOT the daemon OS. + origPath = root.FromSlash(origPath) + origPath = strings.TrimPrefix(origPath, string(root.Separator())) + origPath = strings.TrimPrefix(origPath, "."+string(root.Separator())) + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath, root.OS()) { + return o.copyWithWildcards(origPath) + } + + if imageSource != nil && imageSource.ImageID() != "" { + // return a cached copy if one exists + if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok { + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil + } + } + + // Deal with the single file case + copyInfo, err := copyInfoForFile(o.source, origPath) + switch { + case err != nil: + return nil, err + case copyInfo.hash != "": + o.storeInPathCache(imageSource, origPath, copyInfo.hash) + return newCopyInfos(copyInfo), err + } + + // TODO: remove, handle dirs in Hash() + subfiles, err := walkSource(o.source, origPath) + if err != nil { + return nil, err + } + + hash := hashStringSlice("dir", subfiles) + o.storeInPathCache(imageSource, origPath, hash) + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil +} + +func containsWildcards(name, platform string) bool { + isWindows := platform == "windows" + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' && !isWindows { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func (o *copier) storeInPathCache(im *imageMount, path string, hash string) { + if im != nil { + o.pathCache.Store(im.ImageID()+path, hash) + } +} + +func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) { + root := o.source.Root() + var copyInfos []copyInfo + if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(root, path) + if err != nil { + return err + } + + if rel == "." { + return nil + } + if match, _ := root.Match(origPath, rel); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := o.calcCopyInfo(rel, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil +} + +func copyInfoForFile(source builder.Source, path string) (copyInfo, error) { + fi, err := remotecontext.StatAt(source, path) + if err != nil { + return copyInfo{}, err + } + + if fi.IsDir() { + return copyInfo{}, nil + } + hash, err := source.Hash(path) + if err != nil { + return copyInfo{}, err + } + return newCopyInfoFromSource(source, path, "file:"+hash), nil +} + +// TODO: dedupe with copyWithWildcards() +func walkSource(source builder.Source, origPath string) ([]string, error) { + fp, err := remotecontext.FullPath(source, origPath) + if err != nil { + return nil, err + } + // Must be a dir + var subfiles []string + err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(source.Root(), path) + if err != nil { + return err + } + if rel == "." { + return nil + } + hash, err := source.Hash(rel) + if err != nil { + return nil + } + // we already checked handleHash above + subfiles = append(subfiles, hash) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + return subfiles, nil +} + +type sourceDownloader func(string) (builder.Source, string, error) + +func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader { + return func(url string) (builder.Source, string, error) { + return downloadSource(output, stdout, url) + } +} + +func errOnSourceDownload(_ string) (builder.Source, string, error) { + return nil, "", errors.New("source can't be a URL for COPY") +} + +func getFilenameForDownload(path string, resp *http.Response) string { + // Guess filename based on source + if path != "" && !strings.HasSuffix(path, "/") { + if filename := filepath.Base(filepath.FromSlash(path)); filename != "" { + return filename + } + } + + // Guess filename based on Content-Disposition + if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" { + if _, params, err := mime.ParseMediaType(contentDisposition); err == nil { + if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") { + if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" { + return filename + } + } + } + } + return "" +} + +func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) { + u, err := url.Parse(srcURL) + if err != nil { + return + } + + resp, err := remotecontext.GetWithStatusError(srcURL) + if err != nil { + return + } + + filename := getFilenameForDownload(u.Path, resp) + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + // If filename is empty, the returned filename will be "" but + // the tmp filename will be created as "__unnamed__" + tmpFileName := filename + if filename == "" { + tmpFileName = unnamedFilename + } + tmpFileName = filepath.Join(tmpDir, tmpFileName) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + progressOutput := streamformatter.NewJSONProgressOutput(output, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + // TODO: add filehash directly + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + // TODO: how important is this random blank line to the output? + fmt.Fprintln(stdout) + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + tmpFile.Close() + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir)) + return lc, filename, err +} + +type copyFileOptions struct { + decompress bool + identity idtools.Identity + archiver Archiver +} + +type copyEndpoint struct { + driver containerfs.Driver + path string +} + +func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error { + srcPath, err := source.fullPath() + if err != nil { + return err + } + + destPath, err := dest.fullPath() + if err != nil { + return err + } + + archiver := options.archiver + + srcEndpoint := ©Endpoint{driver: source.root, path: srcPath} + destEndpoint := ©Endpoint{driver: dest.root, path: destPath} + + src, err := source.root.Stat(srcPath) + if err != nil { + return errors.Wrapf(err, "source path not found") + } + if src.IsDir() { + return copyDirectory(archiver, srcEndpoint, destEndpoint, options.identity) + } + if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress { + return archiver.UntarPath(srcPath, destPath) + } + + destExistsAsDir, err := isExistingDirectory(destEndpoint) + if err != nil { + return err + } + // dest.path must be used because destPath has already been cleaned of any + // trailing slash + if endsInSlash(dest.root, dest.path) || destExistsAsDir { + // source.path must be used to get the correct filename when the source + // is a symlink + destPath = dest.root.Join(destPath, source.root.Base(source.path)) + destEndpoint = ©Endpoint{driver: dest.root, path: destPath} + } + return copyFile(archiver, srcEndpoint, destEndpoint, options.identity) +} + +func isArchivePath(driver containerfs.ContainerFS, path string) bool { + file, err := driver.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := archive.DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +func copyDirectory(archiver Archiver, source, dest *copyEndpoint, identity idtools.Identity) error { + destExists, err := isExistingDirectory(dest) + if err != nil { + return errors.Wrapf(err, "failed to query destination path") + } + + if err := archiver.CopyWithTar(source.path, dest.path); err != nil { + return errors.Wrapf(err, "failed to copy directory") + } + // TODO: @gupta-ak. Investigate how LCOW permission mappings will work. + return fixPermissions(source.path, dest.path, identity, !destExists) +} + +func copyFile(archiver Archiver, source, dest *copyEndpoint, identity idtools.Identity) error { + if runtime.GOOS == "windows" && dest.driver.OS() == "linux" { + // LCOW + if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil { + return errors.Wrapf(err, "failed to create new directory") + } + } else { + if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, identity); err != nil { + // Normal containers + return errors.Wrapf(err, "failed to create new directory") + } + } + + if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil { + return errors.Wrapf(err, "failed to copy file") + } + // TODO: @gupta-ak. Investigate how LCOW permission mappings will work. + return fixPermissions(source.path, dest.path, identity, false) +} + +func endsInSlash(driver containerfs.Driver, path string) bool { + return strings.HasSuffix(path, string(driver.Separator())) +} + +// isExistingDirectory returns true if the path exists and is a directory +func isExistingDirectory(point *copyEndpoint) (bool, error) { + destStat, err := point.driver.Stat(point.path) + switch { + case os.IsNotExist(err): + return false, nil + case err != nil: + return false, err + } + return destStat.IsDir(), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go new file mode 100644 index 000000000..770226444 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go @@ -0,0 +1,48 @@ +// +build !windows + +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" +) + +func fixPermissions(source, destination string, identity idtools.Identity, overrideSkip bool) error { + var ( + skipChownRoot bool + err error + ) + if !overrideSkip { + destEndpoint := ©Endpoint{driver: containerfs.NewLocalDriver(), path: destination} + skipChownRoot, err = isExistingDirectory(destEndpoint) + if err != nil { + return err + } + } + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if skipChownRoot && source == fullpath { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, identity.UID, identity.GID) + }) +} + +func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error { + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go new file mode 100644 index 000000000..59c1a4b7c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go @@ -0,0 +1,109 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +var pathBlacklist = map[string]bool{ + "c:\\": true, + "c:\\windows": true, +} + +func init() { + reexec.Register("windows-fix-permissions", fixPermissionsReexec) +} + +func fixPermissions(source, destination string, identity idtools.Identity, _ bool) error { + if identity.SID == "" { + return nil + } + + cmd := reexec.Command("windows-fix-permissions", source, destination, identity.SID) + output, err := cmd.CombinedOutput() + + return errors.Wrapf(err, "failed to exec windows-fix-permissions: %s", output) +} + +func fixPermissionsReexec() { + err := fixPermissionsWindows(os.Args[1], os.Args[2], os.Args[3]) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } +} + +func fixPermissionsWindows(source, destination, SID string) error { + + privileges := []string{winio.SeRestorePrivilege, system.SeTakeOwnershipPrivilege} + + err := winio.EnableProcessPrivileges(privileges) + if err != nil { + return err + } + + defer winio.DisableProcessPrivileges(privileges) + + sid, err := windows.StringToSid(SID) + if err != nil { + return err + } + + // Owners on *nix have read/write/delete/read control and write DAC. + // Add an ACE that grants this to the user/group specified with the + // chown option. Currently Windows is not honoring the owner change, + // however, they are aware of this and it should be fixed at some + // point. + + sddlString := system.SddlAdministratorsLocalSystem + sddlString += "(A;OICI;GRGWGXRCWDSD;;;" + SID + ")" + + securityDescriptor, err := winio.SddlToSecurityDescriptor(sddlString) + if err != nil { + return err + } + + var daclPresent uint32 + var daclDefaulted uint32 + var dacl *byte + + err = system.GetSecurityDescriptorDacl(&securityDescriptor[0], &daclPresent, &dacl, &daclDefaulted) + if err != nil { + return err + } + + return system.SetNamedSecurityInfo(windows.StringToUTF16Ptr(destination), system.SE_FILE_OBJECT, system.OWNER_SECURITY_INFORMATION|system.DACL_SECURITY_INFORMATION, sid, nil, dacl, nil) +} + +func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error { + // validate windows paths from other images + LCOW + if imageSource == nil || platform != "windows" { + return nil + } + + origPath = filepath.FromSlash(origPath) + p := strings.ToLower(filepath.Clean(origPath)) + if !filepath.IsAbs(p) { + if filepath.VolumeName(p) != "" { + if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths + p = p[:len(p)-1] + } + p += "\\" + } else { + p = filepath.Join("c:\\", p) + } + } + if _, blacklisted := pathBlacklist[p]; blacklisted { + return errors.New("copy from c:\\ or c:\\windows is not allowed on windows") + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go new file mode 100644 index 000000000..f2da08ed4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go @@ -0,0 +1,585 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + + "github.com/containerd/containerd/platforms" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/go-connections/nat" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/moby/buildkit/frontend/dockerfile/shell" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func dispatchEnv(d dispatchRequest, c *instructions.EnvCommand) error { + runConfig := d.state.runConfig + commitMessage := bytes.NewBufferString("ENV") + for _, e := range c.Env { + name := e.Key + newVar := e.String() + + commitMessage.WriteString(" " + newVar) + gotOne := false + for i, envVar := range runConfig.Env { + envParts := strings.SplitN(envVar, "=", 2) + compareFrom := envParts[0] + if shell.EqualEnvKeys(compareFrom, name) { + runConfig.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + runConfig.Env = append(runConfig.Env, newVar) + } + } + return d.builder.commit(d.state, commitMessage.String()) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func dispatchMaintainer(d dispatchRequest, c *instructions.MaintainerCommand) error { + + d.state.maintainer = c.Maintainer + return d.builder.commit(d.state, "MAINTAINER "+c.Maintainer) +} + +// LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +func dispatchLabel(d dispatchRequest, c *instructions.LabelCommand) error { + if d.state.runConfig.Labels == nil { + d.state.runConfig.Labels = make(map[string]string) + } + commitStr := "LABEL" + for _, v := range c.Labels { + d.state.runConfig.Labels[v.Key] = v.Value + commitStr += " " + v.String() + } + return d.builder.commit(d.state, commitStr) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (http, https) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func dispatchAdd(d dispatchRequest, c *instructions.AddCommand) error { + downloader := newRemoteSourceDownloader(d.builder.Output, d.builder.Stdout) + copier := copierFromDispatchRequest(d, downloader, nil) + defer copier.Cleanup() + + copyInstruction, err := copier.createCopyInstruction(c.SourcesAndDest, "ADD") + if err != nil { + return err + } + copyInstruction.chownStr = c.Chown + copyInstruction.allowLocalDecompression = true + + return d.builder.performCopy(d, copyInstruction) +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(d dispatchRequest, c *instructions.CopyCommand) error { + var im *imageMount + var err error + if c.From != "" { + im, err = d.getImageMount(c.From) + if err != nil { + return errors.Wrapf(err, "invalid from flag value %s", c.From) + } + } + copier := copierFromDispatchRequest(d, errOnSourceDownload, im) + defer copier.Cleanup() + copyInstruction, err := copier.createCopyInstruction(c.SourcesAndDest, "COPY") + if err != nil { + return err + } + copyInstruction.chownStr = c.Chown + + return d.builder.performCopy(d, copyInstruction) +} + +func (d *dispatchRequest) getImageMount(imageRefOrID string) (*imageMount, error) { + if imageRefOrID == "" { + // TODO: this could return the source in the default case as well? + return nil, nil + } + + var localOnly bool + stage, err := d.stages.get(imageRefOrID) + if err != nil { + return nil, err + } + if stage != nil { + imageRefOrID = stage.Image + localOnly = true + } + return d.builder.imageSources.Get(imageRefOrID, localOnly, d.builder.platform) +} + +// FROM [--platform=platform] imagename[:tag | @digest] [AS build-stage-name] +// +func initializeStage(d dispatchRequest, cmd *instructions.Stage) error { + d.builder.imageProber.Reset() + + var platform *specs.Platform + if v := cmd.Platform; v != "" { + v, err := d.getExpandedString(d.shlex, v) + if err != nil { + return errors.Wrapf(err, "failed to process arguments for platform %s", v) + } + + p, err := platforms.Parse(v) + if err != nil { + return errors.Wrapf(err, "failed to parse platform %s", v) + } + if err := system.ValidatePlatform(p); err != nil { + return err + } + platform = &p + } + + image, err := d.getFromImage(d.shlex, cmd.BaseName, platform) + if err != nil { + return err + } + state := d.state + if err := state.beginStage(cmd.Name, image); err != nil { + return err + } + if len(state.runConfig.OnBuild) > 0 { + triggers := state.runConfig.OnBuild + state.runConfig.OnBuild = nil + return dispatchTriggeredOnBuild(d, triggers) + } + return nil +} + +func dispatchTriggeredOnBuild(d dispatchRequest, triggers []string) error { + fmt.Fprintf(d.builder.Stdout, "# Executing %d build trigger", len(triggers)) + if len(triggers) > 1 { + fmt.Fprint(d.builder.Stdout, "s") + } + fmt.Fprintln(d.builder.Stdout) + for _, trigger := range triggers { + d.state.updateRunConfig() + ast, err := parser.Parse(strings.NewReader(trigger)) + if err != nil { + return err + } + if len(ast.AST.Children) != 1 { + return errors.New("onbuild trigger should be a single expression") + } + cmd, err := instructions.ParseCommand(ast.AST.Children[0]) + if err != nil { + if instructions.IsUnknownInstruction(err) { + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() + } + return err + } + err = dispatch(d, cmd) + if err != nil { + return err + } + } + return nil +} + +func (d *dispatchRequest) getExpandedString(shlex *shell.Lex, str string) (string, error) { + substitutionArgs := []string{} + for key, value := range d.state.buildArgs.GetAllMeta() { + substitutionArgs = append(substitutionArgs, key+"="+value) + } + + name, err := shlex.ProcessWord(str, substitutionArgs) + if err != nil { + return "", err + } + return name, nil +} + +func (d *dispatchRequest) getImageOrStage(name string, platform *specs.Platform) (builder.Image, error) { + var localOnly bool + if im, ok := d.stages.getByName(name); ok { + name = im.Image + localOnly = true + } + + if platform == nil { + platform = d.builder.platform + } + + // Windows cannot support a container with no base image unless it is LCOW. + if name == api.NoBaseImageSpecifier { + p := platforms.DefaultSpec() + if platform != nil { + p = *platform + } + imageImage := &image.Image{} + imageImage.OS = p.OS + + // old windows scratch handling + // TODO: scratch should not have an os. It should be nil image. + // Windows supports scratch. What is not supported is running containers + // from it. + if runtime.GOOS == "windows" { + if platform == nil || platform.OS == "linux" { + if !system.LCOWSupported() { + return nil, errors.New("Linux containers are not supported on this system") + } + imageImage.OS = "linux" + } else if platform.OS == "windows" { + return nil, errors.New("Windows does not support FROM scratch") + } else { + return nil, errors.Errorf("platform %s is not supported", platforms.Format(p)) + } + } + return builder.Image(imageImage), nil + } + imageMount, err := d.builder.imageSources.Get(name, localOnly, platform) + if err != nil { + return nil, err + } + return imageMount.Image(), nil +} +func (d *dispatchRequest) getFromImage(shlex *shell.Lex, basename string, platform *specs.Platform) (builder.Image, error) { + name, err := d.getExpandedString(shlex, basename) + if err != nil { + return nil, err + } + // Empty string is interpreted to FROM scratch by images.GetImageAndReleasableLayer, + // so validate expanded result is not empty. + if name == "" { + return nil, errors.Errorf("base name (%s) should not be blank", basename) + } + + return d.getImageOrStage(name, platform) +} + +func dispatchOnbuild(d dispatchRequest, c *instructions.OnbuildCommand) error { + d.state.runConfig.OnBuild = append(d.state.runConfig.OnBuild, c.Expression) + return d.builder.commit(d.state, "ONBUILD "+c.Expression) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error { + runConfig := d.state.runConfig + var err error + runConfig.WorkingDir, err = normalizeWorkdir(d.state.operatingSystem, runConfig.WorkingDir, c.Path) + if err != nil { + return err + } + + // For performance reasons, we explicitly do a create/mkdir now + // This avoids having an unnecessary expensive mount/unmount calls + // (on Windows in particular) during each container create. + // Prior to 1.13, the mkdir was deferred and not executed at this step. + if d.builder.disableCommit { + // Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo". + // We've already updated the runConfig and that's enough. + return nil + } + + comment := "WORKDIR " + runConfig.WorkingDir + runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, d.state.operatingSystem)) + + containerID, err := d.builder.probeAndCreate(d.state, runConfigWithCommentCmd) + if err != nil || containerID == "" { + return err + } + + if err := d.builder.docker.ContainerCreateWorkdir(containerID); err != nil { + return err + } + + return d.builder.commitContainer(d.state, containerID, runConfigWithCommentCmd) +} + +func resolveCmdLine(cmd instructions.ShellDependantCmdLine, runConfig *container.Config, os string) []string { + result := cmd.CmdLine + if cmd.PrependShell && result != nil { + result = append(getShell(runConfig, os), result...) + } + return result +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under +// Windows, in the event there is only one argument The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux and LCOW) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error { + if !system.IsOSSupported(d.state.operatingSystem) { + return system.ErrNotSupportedOperatingSystem + } + stateRunConfig := d.state.runConfig + cmdFromArgs := resolveCmdLine(c.ShellDependantCmdLine, stateRunConfig, d.state.operatingSystem) + buildArgs := d.state.buildArgs.FilterAllowed(stateRunConfig.Env) + + saveCmd := cmdFromArgs + if len(buildArgs) > 0 { + saveCmd = prependEnvOnCmd(d.state.buildArgs, buildArgs, cmdFromArgs) + } + + runConfigForCacheProbe := copyRunConfig(stateRunConfig, + withCmd(saveCmd), + withEntrypointOverride(saveCmd, nil)) + if hit, err := d.builder.probeCache(d.state, runConfigForCacheProbe); err != nil || hit { + return err + } + + runConfig := copyRunConfig(stateRunConfig, + withCmd(cmdFromArgs), + withEnv(append(stateRunConfig.Env, buildArgs...)), + withEntrypointOverride(saveCmd, strslice.StrSlice{""}), + withoutHealthcheck()) + + // set config as already being escaped, this prevents double escaping on windows + runConfig.ArgsEscaped = true + + cID, err := d.builder.create(runConfig) + if err != nil { + return err + } + + if err := d.builder.containerManager.Run(d.builder.clientCtx, cID, d.builder.Stdout, d.builder.Stderr); err != nil { + if err, ok := err.(*statusCodeError); ok { + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + msg := fmt.Sprintf( + "The command '%s' returned a non-zero code: %d", + strings.Join(runConfig.Cmd, " "), err.StatusCode()) + if err.Error() != "" { + msg = fmt.Sprintf("%s: %s", msg, err.Error()) + } + return &jsonmessage.JSONError{ + Message: msg, + Code: err.StatusCode(), + } + } + return err + } + + return d.builder.commitContainer(d.state, cID, runConfigForCacheProbe) +} + +// Derive the command to use for probeCache() and to commit in this container. +// Note that we only do this if there are any build-time env vars. Also, we +// use the special argument "|#" at the start of the args array. This will +// avoid conflicts with any RUN command since commands can not +// start with | (vertical bar). The "#" (number of build envs) is there to +// help ensure proper cache matches. We don't want a RUN command +// that starts with "foo=abc" to be considered part of a build-time env var. +// +// remove any unreferenced built-in args from the environment variables. +// These args are transparent so resulting image should be the same regardless +// of the value. +func prependEnvOnCmd(buildArgs *BuildArgs, buildArgVars []string, cmd strslice.StrSlice) strslice.StrSlice { + var tmpBuildEnv []string + for _, env := range buildArgVars { + key := strings.SplitN(env, "=", 2)[0] + if buildArgs.IsReferencedOrNotBuiltin(key) { + tmpBuildEnv = append(tmpBuildEnv, env) + } + } + + sort.Strings(tmpBuildEnv) + tmpEnv := append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...) + return strslice.StrSlice(append(tmpEnv, cmd...)) +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error { + runConfig := d.state.runConfig + cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem) + runConfig.Cmd = cmd + // set config as already being escaped, this prevents double escaping on windows + runConfig.ArgsEscaped = true + + if err := d.builder.commit(d.state, fmt.Sprintf("CMD %q", cmd)); err != nil { + return err + } + + if len(c.ShellDependantCmdLine.CmdLine) != 0 { + d.state.cmdSet = true + } + + return nil +} + +// HEALTHCHECK foo +// +// Set the default healthcheck command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand) error { + runConfig := d.state.runConfig + if runConfig.Healthcheck != nil { + oldCmd := runConfig.Healthcheck.Test + if len(oldCmd) > 0 && oldCmd[0] != "NONE" { + fmt.Fprintf(d.builder.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) + } + } + runConfig.Healthcheck = c.Health + return d.builder.commit(d.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck)) +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments +// to /usr/sbin/nginx. Uses the default shell if not in JSON format. +// +// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint +// is initialized at newBuilder time instead of through argument parsing. +// +func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) error { + runConfig := d.state.runConfig + cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem) + runConfig.Entrypoint = cmd + if !d.state.cmdSet { + runConfig.Cmd = nil + } + + return d.builder.commit(d.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint)) +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// req.runConfig.ExposedPorts for runconfig. +// +func dispatchExpose(d dispatchRequest, c *instructions.ExposeCommand, envs []string) error { + // custom multi word expansion + // expose $FOO with FOO="80 443" is expanded as EXPOSE [80,443]. This is the only command supporting word to words expansion + // so the word processing has been de-generalized + ports := []string{} + for _, p := range c.Ports { + ps, err := d.shlex.ProcessWords(p, envs) + if err != nil { + return err + } + ports = append(ports, ps...) + } + c.Ports = ports + + ps, _, err := nat.ParsePortSpecs(ports) + if err != nil { + return err + } + + if d.state.runConfig.ExposedPorts == nil { + d.state.runConfig.ExposedPorts = make(nat.PortSet) + } + for p := range ps { + d.state.runConfig.ExposedPorts[p] = struct{}{} + } + + return d.builder.commit(d.state, "EXPOSE "+strings.Join(c.Ports, " ")) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func dispatchUser(d dispatchRequest, c *instructions.UserCommand) error { + d.state.runConfig.User = c.User + return d.builder.commit(d.state, fmt.Sprintf("USER %v", c.User)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func dispatchVolume(d dispatchRequest, c *instructions.VolumeCommand) error { + if d.state.runConfig.Volumes == nil { + d.state.runConfig.Volumes = map[string]struct{}{} + } + for _, v := range c.Volumes { + if v == "" { + return errors.New("VOLUME specified can not be an empty string") + } + d.state.runConfig.Volumes[v] = struct{}{} + } + return d.builder.commit(d.state, fmt.Sprintf("VOLUME %v", c.Volumes)) +} + +// STOPSIGNAL signal +// +// Set the signal that will be used to kill the container. +func dispatchStopSignal(d dispatchRequest, c *instructions.StopSignalCommand) error { + + _, err := signal.ParseSignal(c.Signal) + if err != nil { + return errdefs.InvalidParameter(err) + } + d.state.runConfig.StopSignal = c.Signal + return d.builder.commit(d.state, fmt.Sprintf("STOPSIGNAL %v", c.Signal)) +} + +// ARG name[=value] +// +// Adds the variable foo to the trusted list of variables that can be passed +// to builder using the --build-arg flag for expansion/substitution or passing to 'run'. +// Dockerfile author may optionally set a default value of this variable. +func dispatchArg(d dispatchRequest, c *instructions.ArgCommand) error { + + commitStr := "ARG " + c.Key + if c.Value != nil { + commitStr += "=" + *c.Value + } + + d.state.buildArgs.AddArg(c.Key, c.Value) + return d.builder.commit(d.state, commitStr) +} + +// SHELL powershell -command +// +// Set the non-default shell to use. +func dispatchShell(d dispatchRequest, c *instructions.ShellCommand) error { + d.state.runConfig.Shell = c.Shell + return d.builder.commit(d.state, fmt.Sprintf("SHELL %v", d.state.runConfig.Shell)) +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go new file mode 100644 index 000000000..b3ba38032 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go @@ -0,0 +1,23 @@ +// +build !windows + +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "errors" + "os" + "path/filepath" +) + +// normalizeWorkdir normalizes a user requested working directory in a +// platform semantically consistent way. +func normalizeWorkdir(_ string, current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalize nothing") + } + current = filepath.FromSlash(current) + requested = filepath.FromSlash(requested) + if !filepath.IsAbs(requested) { + return filepath.Join(string(os.PathSeparator), current, requested), nil + } + return requested, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go new file mode 100644 index 000000000..7824d1169 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go @@ -0,0 +1,95 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "errors" + "fmt" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/pkg/system" +) + +var pattern = regexp.MustCompile(`^[a-zA-Z]:\.$`) + +// normalizeWorkdir normalizes a user requested working directory in a +// platform semantically consistent way. +func normalizeWorkdir(platform string, current string, requested string) (string, error) { + if platform == "" { + platform = "windows" + } + if platform == "windows" { + return normalizeWorkdirWindows(current, requested) + } + return normalizeWorkdirUnix(current, requested) +} + +// normalizeWorkdirUnix normalizes a user requested working directory in a +// platform semantically consistent way. +func normalizeWorkdirUnix(current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalize nothing") + } + current = strings.Replace(current, string(os.PathSeparator), "/", -1) + requested = strings.Replace(requested, string(os.PathSeparator), "/", -1) + if !path.IsAbs(requested) { + return path.Join(`/`, current, requested), nil + } + return requested, nil +} + +// normalizeWorkdirWindows normalizes a user requested working directory in a +// platform semantically consistent way. +func normalizeWorkdirWindows(current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalize nothing") + } + + // `filepath.Clean` will replace "" with "." so skip in that case + if current != "" { + current = filepath.Clean(current) + } + if requested != "" { + requested = filepath.Clean(requested) + } + + // If either current or requested in Windows is: + // C: + // C:. + // then an error will be thrown as the definition for the above + // refers to `current directory on drive C:` + // Since filepath.Clean() will automatically normalize the above + // to `C:.`, we only need to check the last format + if pattern.MatchString(current) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", current) + } + if pattern.MatchString(requested) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", requested) + } + + // Target semantics is C:\somefolder, specifically in the format: + // UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already + // guaranteed that `current`, if set, is consistent. This allows us to + // cope correctly with any of the following in a Dockerfile: + // WORKDIR a --> C:\a + // WORKDIR c:\\foo --> C:\foo + // WORKDIR \\foo --> C:\foo + // WORKDIR /foo --> C:\foo + // WORKDIR c:\\foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR \\bar --> C:\foo --> C:\bar + // WORKDIR /foo \ WORKDIR c:/bar --> C:\foo --> C:\bar + if len(current) == 0 || system.IsAbs(requested) { + if (requested[0] == os.PathSeparator) || + (len(requested) > 1 && string(requested[1]) != ":") || + (len(requested) == 1) { + requested = filepath.Join(`C:\`, requested) + } + } else { + requested = filepath.Join(current, requested) + } + // Upper-case drive letter + return (strings.ToUpper(string(requested[0])) + requested[1:]), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go new file mode 100644 index 000000000..02e147752 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go @@ -0,0 +1,250 @@ +// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling newBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of responsibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "reflect" + "runtime" + "strconv" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig/opts" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/pkg/errors" +) + +func dispatch(d dispatchRequest, cmd instructions.Command) (err error) { + if c, ok := cmd.(instructions.PlatformSpecific); ok { + err := c.CheckPlatform(d.state.operatingSystem) + if err != nil { + return errdefs.InvalidParameter(err) + } + } + runConfigEnv := d.state.runConfig.Env + envs := append(runConfigEnv, d.state.buildArgs.FilterAllowed(runConfigEnv)...) + + if ex, ok := cmd.(instructions.SupportsSingleWordExpansion); ok { + err := ex.Expand(func(word string) (string, error) { + return d.shlex.ProcessWord(word, envs) + }) + if err != nil { + return errdefs.InvalidParameter(err) + } + } + + defer func() { + if d.builder.options.ForceRemove { + d.builder.containerManager.RemoveAll(d.builder.Stdout) + return + } + if d.builder.options.Remove && err == nil { + d.builder.containerManager.RemoveAll(d.builder.Stdout) + return + } + }() + switch c := cmd.(type) { + case *instructions.EnvCommand: + return dispatchEnv(d, c) + case *instructions.MaintainerCommand: + return dispatchMaintainer(d, c) + case *instructions.LabelCommand: + return dispatchLabel(d, c) + case *instructions.AddCommand: + return dispatchAdd(d, c) + case *instructions.CopyCommand: + return dispatchCopy(d, c) + case *instructions.OnbuildCommand: + return dispatchOnbuild(d, c) + case *instructions.WorkdirCommand: + return dispatchWorkdir(d, c) + case *instructions.RunCommand: + return dispatchRun(d, c) + case *instructions.CmdCommand: + return dispatchCmd(d, c) + case *instructions.HealthCheckCommand: + return dispatchHealthcheck(d, c) + case *instructions.EntrypointCommand: + return dispatchEntrypoint(d, c) + case *instructions.ExposeCommand: + return dispatchExpose(d, c, envs) + case *instructions.UserCommand: + return dispatchUser(d, c) + case *instructions.VolumeCommand: + return dispatchVolume(d, c) + case *instructions.StopSignalCommand: + return dispatchStopSignal(d, c) + case *instructions.ArgCommand: + return dispatchArg(d, c) + case *instructions.ShellCommand: + return dispatchShell(d, c) + } + return errors.Errorf("unsupported command type: %v", reflect.TypeOf(cmd)) +} + +// dispatchState is a data object which is modified by dispatchers +type dispatchState struct { + runConfig *container.Config + maintainer string + cmdSet bool + imageID string + baseImage builder.Image + stageName string + buildArgs *BuildArgs + operatingSystem string +} + +func newDispatchState(baseArgs *BuildArgs) *dispatchState { + args := baseArgs.Clone() + args.ResetAllowed() + return &dispatchState{runConfig: &container.Config{}, buildArgs: args} +} + +type stagesBuildResults struct { + flat []*container.Config + indexed map[string]*container.Config +} + +func newStagesBuildResults() *stagesBuildResults { + return &stagesBuildResults{ + indexed: make(map[string]*container.Config), + } +} + +func (r *stagesBuildResults) getByName(name string) (*container.Config, bool) { + c, ok := r.indexed[strings.ToLower(name)] + return c, ok +} + +func (r *stagesBuildResults) validateIndex(i int) error { + if i == len(r.flat) { + return errors.New("refers to current build stage") + } + if i < 0 || i > len(r.flat) { + return errors.New("index out of bounds") + } + return nil +} + +func (r *stagesBuildResults) get(nameOrIndex string) (*container.Config, error) { + if c, ok := r.getByName(nameOrIndex); ok { + return c, nil + } + ix, err := strconv.ParseInt(nameOrIndex, 10, 0) + if err != nil { + return nil, nil + } + if err := r.validateIndex(int(ix)); err != nil { + return nil, err + } + return r.flat[ix], nil +} + +func (r *stagesBuildResults) checkStageNameAvailable(name string) error { + if name != "" { + if _, ok := r.getByName(name); ok { + return errors.Errorf("%s stage name already used", name) + } + } + return nil +} + +func (r *stagesBuildResults) commitStage(name string, config *container.Config) error { + if name != "" { + if _, ok := r.getByName(name); ok { + return errors.Errorf("%s stage name already used", name) + } + r.indexed[strings.ToLower(name)] = config + } + r.flat = append(r.flat, config) + return nil +} + +func commitStage(state *dispatchState, stages *stagesBuildResults) error { + return stages.commitStage(state.stageName, state.runConfig) +} + +type dispatchRequest struct { + state *dispatchState + shlex *shell.Lex + builder *Builder + source builder.Source + stages *stagesBuildResults +} + +func newDispatchRequest(builder *Builder, escapeToken rune, source builder.Source, buildArgs *BuildArgs, stages *stagesBuildResults) dispatchRequest { + return dispatchRequest{ + state: newDispatchState(buildArgs), + shlex: shell.NewLex(escapeToken), + builder: builder, + source: source, + stages: stages, + } +} + +func (s *dispatchState) updateRunConfig() { + s.runConfig.Image = s.imageID +} + +// hasFromImage returns true if the builder has processed a `FROM ` line +func (s *dispatchState) hasFromImage() bool { + return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "") +} + +func (s *dispatchState) beginStage(stageName string, image builder.Image) error { + s.stageName = stageName + s.imageID = image.ImageID() + s.operatingSystem = image.OperatingSystem() + if s.operatingSystem == "" { // In case it isn't set + s.operatingSystem = runtime.GOOS + } + if !system.IsOSSupported(s.operatingSystem) { + return system.ErrNotSupportedOperatingSystem + } + + if image.RunConfig() != nil { + // copy avoids referencing the same instance when 2 stages have the same base + s.runConfig = copyRunConfig(image.RunConfig()) + } else { + s.runConfig = &container.Config{} + } + s.baseImage = image + s.setDefaultPath() + s.runConfig.OpenStdin = false + s.runConfig.StdinOnce = false + return nil +} + +// Add the default PATH to runConfig.ENV if one exists for the operating system and there +// is no PATH set. Note that Windows containers on Windows won't have one as it's set by HCS +func (s *dispatchState) setDefaultPath() { + defaultPath := system.DefaultPathEnv(s.operatingSystem) + if defaultPath == "" { + return + } + envMap := opts.ConvertKVStringsToMap(s.runConfig.Env) + if _, ok := envMap["PATH"]; !ok { + s.runConfig.Env = append(s.runConfig.Env, "PATH="+defaultPath) + } +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go new file mode 100644 index 000000000..08cb396a2 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go @@ -0,0 +1,122 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "context" + "runtime" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + dockerimage "github.com/docker/docker/image" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type getAndMountFunc func(string, bool, *specs.Platform) (builder.Image, builder.ROLayer, error) + +// imageSources mounts images and provides a cache for mounted images. It tracks +// all images so they can be unmounted at the end of the build. +type imageSources struct { + byImageID map[string]*imageMount + mounts []*imageMount + getImage getAndMountFunc +} + +func newImageSources(ctx context.Context, options builderOptions) *imageSources { + getAndMount := func(idOrRef string, localOnly bool, platform *specs.Platform) (builder.Image, builder.ROLayer, error) { + pullOption := backend.PullOptionNoPull + if !localOnly { + if options.Options.PullParent { + pullOption = backend.PullOptionForcePull + } else { + pullOption = backend.PullOptionPreferLocal + } + } + return options.Backend.GetImageAndReleasableLayer(ctx, idOrRef, backend.GetImageAndLayerOptions{ + PullOption: pullOption, + AuthConfig: options.Options.AuthConfigs, + Output: options.ProgressWriter.Output, + Platform: platform, + }) + } + + return &imageSources{ + byImageID: make(map[string]*imageMount), + getImage: getAndMount, + } +} + +func (m *imageSources) Get(idOrRef string, localOnly bool, platform *specs.Platform) (*imageMount, error) { + if im, ok := m.byImageID[idOrRef]; ok { + return im, nil + } + + image, layer, err := m.getImage(idOrRef, localOnly, platform) + if err != nil { + return nil, err + } + im := newImageMount(image, layer) + m.Add(im) + return im, nil +} + +func (m *imageSources) Unmount() (retErr error) { + for _, im := range m.mounts { + if err := im.unmount(); err != nil { + logrus.Error(err) + retErr = err + } + } + return +} + +func (m *imageSources) Add(im *imageMount) { + switch im.image { + case nil: + // set the OS for scratch images + os := runtime.GOOS + // Windows does not support scratch except for LCOW + if runtime.GOOS == "windows" { + os = "linux" + } + im.image = &dockerimage.Image{V1Image: dockerimage.V1Image{OS: os}} + default: + m.byImageID[im.image.ImageID()] = im + } + m.mounts = append(m.mounts, im) +} + +// imageMount is a reference to an image that can be used as a builder.Source +type imageMount struct { + image builder.Image + source builder.Source + layer builder.ROLayer +} + +func newImageMount(image builder.Image, layer builder.ROLayer) *imageMount { + im := &imageMount{image: image, layer: layer} + return im +} + +func (im *imageMount) unmount() error { + if im.layer == nil { + return nil + } + if err := im.layer.Release(); err != nil { + return errors.Wrapf(err, "failed to unmount previous build image %s", im.image.ImageID()) + } + im.layer = nil + return nil +} + +func (im *imageMount) Image() builder.Image { + return im.image +} + +func (im *imageMount) NewRWLayer() (builder.RWLayer, error) { + return im.layer.NewRWLayer() +} + +func (im *imageMount) ImageID() string { + return im.image.ImageID() +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go new file mode 100644 index 000000000..6960bf889 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go @@ -0,0 +1,63 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/sirupsen/logrus" +) + +// ImageProber exposes an Image cache to the Builder. It supports resetting a +// cache. +type ImageProber interface { + Reset() + Probe(parentID string, runConfig *container.Config) (string, error) +} + +type imageProber struct { + cache builder.ImageCache + reset func() builder.ImageCache + cacheBusted bool +} + +func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, noCache bool) ImageProber { + if noCache { + return &nopProber{} + } + + reset := func() builder.ImageCache { + return cacheBuilder.MakeImageCache(cacheFrom) + } + return &imageProber{cache: reset(), reset: reset} +} + +func (c *imageProber) Reset() { + c.cache = c.reset() + c.cacheBusted = false +} + +// Probe checks if cache match can be found for current build instruction. +// It returns the cachedID if there is a hit, and the empty string on miss +func (c *imageProber) Probe(parentID string, runConfig *container.Config) (string, error) { + if c.cacheBusted { + return "", nil + } + cacheID, err := c.cache.GetCache(parentID, runConfig) + if err != nil { + return "", err + } + if len(cacheID) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd) + c.cacheBusted = true + return "", nil + } + logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd) + return cacheID, nil +} + +type nopProber struct{} + +func (c *nopProber) Reset() {} + +func (c *nopProber) Probe(_ string, _ *container.Config) (string, error) { + return "", nil +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/internals.go new file mode 100644 index 000000000..1635981f1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/internals.go @@ -0,0 +1,500 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "os" + "path" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Archiver defines an interface for copying files from one destination to +// another using Tar/Untar. +type Archiver interface { + TarUntar(src, dst string) error + UntarPath(src, dst string) error + CopyWithTar(src, dst string) error + CopyFileWithTar(src, dst string) error + IdentityMapping() *idtools.IdentityMapping +} + +// The builder will use the following interfaces if the container fs implements +// these for optimized copies to and from the container. +type extractor interface { + ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error +} + +type archiver interface { + ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) +} + +// helper functions to get tar/untar func +func untarFunc(i interface{}) containerfs.UntarFunc { + if ea, ok := i.(extractor); ok { + return ea.ExtractArchive + } + return chrootarchive.Untar +} + +func tarFunc(i interface{}) containerfs.TarFunc { + if ap, ok := i.(archiver); ok { + return ap.ArchivePath + } + return archive.TarWithOptions +} + +func (b *Builder) getArchiver(src, dst containerfs.Driver) Archiver { + t, u := tarFunc(src), untarFunc(dst) + return &containerfs.Archiver{ + SrcDriver: src, + DstDriver: dst, + Tar: t, + Untar: u, + IDMapping: b.idMapping, + } +} + +func (b *Builder) commit(dispatchState *dispatchState, comment string) error { + if b.disableCommit { + return nil + } + if !dispatchState.hasFromImage() { + return errors.New("Please provide a source image with `from` prior to commit") + } + + runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, dispatchState.operatingSystem)) + id, err := b.probeAndCreate(dispatchState, runConfigWithCommentCmd) + if err != nil || id == "" { + return err + } + + return b.commitContainer(dispatchState, id, runConfigWithCommentCmd) +} + +func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error { + if b.disableCommit { + return nil + } + + commitCfg := backend.CommitConfig{ + Author: dispatchState.maintainer, + // TODO: this copy should be done by Commit() + Config: copyRunConfig(dispatchState.runConfig), + ContainerConfig: containerConfig, + ContainerID: id, + } + + imageID, err := b.docker.CommitBuildStep(commitCfg) + dispatchState.imageID = string(imageID) + return err +} + +func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, parent builder.Image, runConfig *container.Config) error { + newLayer, err := layer.Commit() + if err != nil { + return err + } + + // add an image mount without an image so the layer is properly unmounted + // if there is an error before we can add the full mount with image + b.imageSources.Add(newImageMount(nil, newLayer)) + + parentImage, ok := parent.(*image.Image) + if !ok { + return errors.Errorf("unexpected image type") + } + + newImage := image.NewChildImage(parentImage, image.ChildConfig{ + Author: state.maintainer, + ContainerConfig: runConfig, + DiffID: newLayer.DiffID(), + Config: copyRunConfig(state.runConfig), + }, parentImage.OS) + + // TODO: it seems strange to marshal this here instead of just passing in the + // image struct + config, err := newImage.MarshalJSON() + if err != nil { + return errors.Wrap(err, "failed to encode image config") + } + + exportedImage, err := b.docker.CreateImage(config, state.imageID) + if err != nil { + return errors.Wrapf(err, "failed to export image") + } + + state.imageID = exportedImage.ImageID() + b.imageSources.Add(newImageMount(exportedImage, newLayer)) + return nil +} + +func (b *Builder) performCopy(req dispatchRequest, inst copyInstruction) error { + state := req.state + srcHash := getSourceHashFromInfos(inst.infos) + + var chownComment string + if inst.chownStr != "" { + chownComment = fmt.Sprintf("--chown=%s", inst.chownStr) + } + commentStr := fmt.Sprintf("%s %s%s in %s ", inst.cmdName, chownComment, srcHash, inst.dest) + + // TODO: should this have been using origPaths instead of srcHash in the comment? + runConfigWithCommentCmd := copyRunConfig( + state.runConfig, + withCmdCommentString(commentStr, state.operatingSystem)) + hit, err := b.probeCache(state, runConfigWithCommentCmd) + if err != nil || hit { + return err + } + + imageMount, err := b.imageSources.Get(state.imageID, true, req.builder.platform) + if err != nil { + return errors.Wrapf(err, "failed to get destination image %q", state.imageID) + } + + rwLayer, err := imageMount.NewRWLayer() + if err != nil { + return err + } + defer rwLayer.Release() + + destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, rwLayer, state.operatingSystem) + if err != nil { + return err + } + + identity := b.idMapping.RootPair() + // if a chown was requested, perform the steps to get the uid, gid + // translated (if necessary because of user namespaces), and replace + // the root pair with the chown pair for copy operations + if inst.chownStr != "" { + identity, err = parseChownFlag(b, state, inst.chownStr, destInfo.root.Path(), b.idMapping) + if err != nil { + if b.options.Platform != "windows" { + return errors.Wrapf(err, "unable to convert uid/gid chown string to host mapping") + } + + return errors.Wrapf(err, "unable to map container user account name to SID") + } + } + + for _, info := range inst.infos { + opts := copyFileOptions{ + decompress: inst.allowLocalDecompression, + archiver: b.getArchiver(info.root, destInfo.root), + identity: identity, + } + if err := performCopyForInfo(destInfo, info, opts); err != nil { + return errors.Wrapf(err, "failed to copy files") + } + } + return b.exportImage(state, rwLayer, imageMount.Image(), runConfigWithCommentCmd) +} + +func createDestInfo(workingDir string, inst copyInstruction, rwLayer builder.RWLayer, platform string) (copyInfo, error) { + // Twiddle the destination when it's a relative path - meaning, make it + // relative to the WORKINGDIR + dest, err := normalizeDest(workingDir, inst.dest, platform) + if err != nil { + return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName) + } + + return copyInfo{root: rwLayer.Root(), path: dest}, nil +} + +// normalizeDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normalizeDest(workingDir, requested string, platform string) (string, error) { + dest := fromSlash(requested, platform) + endsInSlash := strings.HasSuffix(dest, string(separator(platform))) + + if platform != "windows" { + if !path.IsAbs(requested) { + dest = path.Join("/", filepath.ToSlash(workingDir), dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += "/" + } + } + return dest, nil + } + + // We are guaranteed that the working directory is already consistent, + // However, Windows also has, for now, the limitation that ADD/COPY can + // only be done to the system drive, not any drives that might be present + // as a result of a bind mount. + // + // So... if the path requested is Linux-style absolute (/foo or \\foo), + // we assume it is the system drive. If it is a Windows-style absolute + // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we + // strip any configured working directories drive letter so that it + // can be subsequently legitimately converted to a Windows volume-style + // pathname. + + // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as + // we only want to validate where the DriveColon part has been supplied. + if filepath.IsAbs(dest) { + if strings.ToUpper(string(dest[0])) != "C" { + return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)") + } + dest = dest[2:] // Strip the drive letter + } + + // Cannot handle relative where WorkingDir is not the system drive. + if len(workingDir) > 0 { + if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { + return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) + } + if !system.IsAbs(dest) { + if string(workingDir[0]) != "C" { + return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive") + } + dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + } + return dest, nil +} + +// For backwards compat, if there's just one info then use it as the +// cache look-up string, otherwise hash 'em all into one +func getSourceHashFromInfos(infos []copyInfo) string { + if len(infos) == 1 { + return infos[0].hash + } + var hashs []string + for _, info := range infos { + hashs = append(hashs, info.hash) + } + return hashStringSlice("multi", hashs) +} + +func hashStringSlice(prefix string, slice []string) string { + hasher := sha256.New() + hasher.Write([]byte(strings.Join(slice, ","))) + return prefix + ":" + hex.EncodeToString(hasher.Sum(nil)) +} + +type runConfigModifier func(*container.Config) + +func withCmd(cmd []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = cmd + } +} + +// withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for +// why there are two almost identical versions of this. +func withCmdComment(comment string, platform string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) ", comment) + } +} + +// withCmdCommentString exists to maintain compatibility with older versions. +// A few instructions (workdir, copy, add) used a nop comment that is a single arg +// where as all the other instructions used a two arg comment string. This +// function implements the single arg version. +func withCmdCommentString(comment string, platform string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) "+comment) + } +} + +func withEnv(env []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Env = env + } +} + +// withEntrypointOverride sets an entrypoint on runConfig if the command is +// not empty. The entrypoint is left unmodified if command is empty. +// +// The dockerfile RUN instruction expect to run without an entrypoint +// so the runConfig entrypoint needs to be modified accordingly. ContainerCreate +// will change a []string{""} entrypoint to nil, so we probe the cache with the +// nil entrypoint. +func withEntrypointOverride(cmd []string, entrypoint []string) runConfigModifier { + return func(runConfig *container.Config) { + if len(cmd) > 0 { + runConfig.Entrypoint = entrypoint + } + } +} + +// withoutHealthcheck disables healthcheck. +// +// The dockerfile RUN instruction expect to run without healthcheck +// so the runConfig Healthcheck needs to be disabled. +func withoutHealthcheck() runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Healthcheck = &container.HealthConfig{ + Test: []string{"NONE"}, + } + } +} + +func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config { + copy := *runConfig + copy.Cmd = copyStringSlice(runConfig.Cmd) + copy.Env = copyStringSlice(runConfig.Env) + copy.Entrypoint = copyStringSlice(runConfig.Entrypoint) + copy.OnBuild = copyStringSlice(runConfig.OnBuild) + copy.Shell = copyStringSlice(runConfig.Shell) + + if copy.Volumes != nil { + copy.Volumes = make(map[string]struct{}, len(runConfig.Volumes)) + for k, v := range runConfig.Volumes { + copy.Volumes[k] = v + } + } + + if copy.ExposedPorts != nil { + copy.ExposedPorts = make(nat.PortSet, len(runConfig.ExposedPorts)) + for k, v := range runConfig.ExposedPorts { + copy.ExposedPorts[k] = v + } + } + + if copy.Labels != nil { + copy.Labels = make(map[string]string, len(runConfig.Labels)) + for k, v := range runConfig.Labels { + copy.Labels[k] = v + } + } + + for _, modifier := range modifiers { + modifier(©) + } + return © +} + +func copyStringSlice(orig []string) []string { + if orig == nil { + return nil + } + return append([]string{}, orig...) +} + +// getShell is a helper function which gets the right shell for prefixing the +// shell-form of RUN, ENTRYPOINT and CMD instructions +func getShell(c *container.Config, os string) []string { + if 0 == len(c.Shell) { + return append([]string{}, defaultShellForOS(os)[:]...) + } + return append([]string{}, c.Shell[:]...) +} + +func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) { + cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig) + if cachedID == "" || err != nil { + return false, err + } + fmt.Fprint(b.Stdout, " ---> Using cache\n") + + dispatchState.imageID = cachedID + return true, nil +} + +var defaultLogConfig = container.LogConfig{Type: "none"} + +func (b *Builder) probeAndCreate(dispatchState *dispatchState, runConfig *container.Config) (string, error) { + if hit, err := b.probeCache(dispatchState, runConfig); err != nil || hit { + return "", err + } + return b.create(runConfig) +} + +func (b *Builder) create(runConfig *container.Config) (string, error) { + logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) + + isWCOW := runtime.GOOS == "windows" && b.platform != nil && b.platform.OS == "windows" + hostConfig := hostConfigFromOptions(b.options, isWCOW) + container, err := b.containerManager.Create(runConfig, hostConfig) + if err != nil { + return "", err + } + // TODO: could this be moved into containerManager.Create() ? + for _, warning := range container.Warnings { + fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) + } + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(container.ID)) + return container.ID, nil +} + +func hostConfigFromOptions(options *types.ImageBuildOptions, isWCOW bool) *container.HostConfig { + resources := container.Resources{ + CgroupParent: options.CgroupParent, + CPUShares: options.CPUShares, + CPUPeriod: options.CPUPeriod, + CPUQuota: options.CPUQuota, + CpusetCpus: options.CPUSetCPUs, + CpusetMems: options.CPUSetMems, + Memory: options.Memory, + MemorySwap: options.MemorySwap, + Ulimits: options.Ulimits, + } + + hc := &container.HostConfig{ + SecurityOpt: options.SecurityOpt, + Isolation: options.Isolation, + ShmSize: options.ShmSize, + Resources: resources, + NetworkMode: container.NetworkMode(options.NetworkMode), + // Set a log config to override any default value set on the daemon + LogConfig: defaultLogConfig, + ExtraHosts: options.ExtraHosts, + } + + // For WCOW, the default of 20GB hard-coded in the platform + // is too small for builder scenarios where many users are + // using RUN statements to install large amounts of data. + // Use 127GB as that's the default size of a VHD in Hyper-V. + if isWCOW { + hc.StorageOpt = make(map[string]string) + hc.StorageOpt["size"] = "127GB" + } + + return hc +} + +// fromSlash works like filepath.FromSlash but with a given OS platform field +func fromSlash(path, platform string) string { + if platform == "windows" { + return strings.Replace(path, "/", "\\", -1) + } + return path +} + +// separator returns a OS path separator for the given OS platform +func separator(platform string) byte { + if platform == "windows" { + return '\\' + } + return '/' +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go new file mode 100644 index 000000000..5dbd86f14 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go @@ -0,0 +1,88 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/symlink" + lcUser "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" +) + +func parseChownFlag(builder *Builder, state *dispatchState, chown, ctrRootPath string, identityMapping *idtools.IdentityMapping) (idtools.Identity, error) { + var userStr, grpStr string + parts := strings.Split(chown, ":") + if len(parts) > 2 { + return idtools.Identity{}, errors.New("invalid chown string format: " + chown) + } + if len(parts) == 1 { + // if no group specified, use the user spec as group as well + userStr, grpStr = parts[0], parts[0] + } else { + userStr, grpStr = parts[0], parts[1] + } + + passwdPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "passwd"), ctrRootPath) + if err != nil { + return idtools.Identity{}, errors.Wrapf(err, "can't resolve /etc/passwd path in container rootfs") + } + groupPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "group"), ctrRootPath) + if err != nil { + return idtools.Identity{}, errors.Wrapf(err, "can't resolve /etc/group path in container rootfs") + } + uid, err := lookupUser(userStr, passwdPath) + if err != nil { + return idtools.Identity{}, errors.Wrapf(err, "can't find uid for user "+userStr) + } + gid, err := lookupGroup(grpStr, groupPath) + if err != nil { + return idtools.Identity{}, errors.Wrapf(err, "can't find gid for group "+grpStr) + } + + // convert as necessary because of user namespaces + chownPair, err := identityMapping.ToHost(idtools.Identity{UID: uid, GID: gid}) + if err != nil { + return idtools.Identity{}, errors.Wrapf(err, "unable to convert uid/gid to host mapping") + } + return chownPair, nil +} + +func lookupUser(userStr, filepath string) (int, error) { + // if the string is actually a uid integer, parse to int and return + // as we don't need to translate with the help of files + uid, err := strconv.Atoi(userStr) + if err == nil { + return uid, nil + } + users, err := lcUser.ParsePasswdFileFilter(filepath, func(u lcUser.User) bool { + return u.Name == userStr + }) + if err != nil { + return 0, err + } + if len(users) == 0 { + return 0, errors.New("no such user: " + userStr) + } + return users[0].Uid, nil +} + +func lookupGroup(groupStr, filepath string) (int, error) { + // if the string is actually a gid integer, parse to int and return + // as we don't need to translate with the help of files + gid, err := strconv.Atoi(groupStr) + if err == nil { + return gid, nil + } + groups, err := lcUser.ParseGroupFileFilter(filepath, func(g lcUser.Group) bool { + return g.Name == groupStr + }) + if err != nil { + return 0, err + } + if len(groups) == 0 { + return 0, errors.New("no such group: " + groupStr) + } + return groups[0].Gid, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go new file mode 100644 index 000000000..82135a12a --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go @@ -0,0 +1,120 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "bytes" + "os" + "path/filepath" + "strings" + + "github.com/containerd/containerd/platforms" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +func parseChownFlag(builder *Builder, state *dispatchState, chown, ctrRootPath string, identityMapping *idtools.IdentityMapping) (idtools.Identity, error) { + if builder.options.Platform == "windows" { + return getAccountIdentity(builder, chown, ctrRootPath, state) + } + + return identityMapping.RootPair(), nil +} + +func getAccountIdentity(builder *Builder, accountName string, ctrRootPath string, state *dispatchState) (idtools.Identity, error) { + // If this is potentially a string SID then attempt to convert it to verify + // this, otherwise continue looking for the account. + if strings.HasPrefix(accountName, "S-") || strings.HasPrefix(accountName, "s-") { + sid, err := windows.StringToSid(accountName) + + if err == nil { + accountSid, err := sid.String() + + if err != nil { + return idtools.Identity{SID: ""}, errors.Wrapf(err, "error converting SID to string") + } + + return idtools.Identity{SID: accountSid}, nil + } + } + + // Attempt to obtain the SID using the name. + sid, _, accType, err := windows.LookupSID("", accountName) + + // If this is a SID that is built-in and hence the same across all systems then use that. + if err == nil && (accType == windows.SidTypeAlias || accType == windows.SidTypeWellKnownGroup) { + accountSid, err := sid.String() + + if err != nil { + return idtools.Identity{SID: ""}, errors.Wrapf(err, "error converting SID to string") + } + + return idtools.Identity{SID: accountSid}, nil + } + + // Check if the account name is one unique to containers. + if strings.EqualFold(accountName, "ContainerAdministrator") { + return idtools.Identity{SID: system.ContainerAdministratorSidString}, nil + + } else if strings.EqualFold(accountName, "ContainerUser") { + return idtools.Identity{SID: system.ContainerUserSidString}, nil + } + + // All other lookups failed, so therefore determine if the account in + // question exists in the container and if so, obtain its SID. + return lookupNTAccount(builder, accountName, state) +} + +func lookupNTAccount(builder *Builder, accountName string, state *dispatchState) (idtools.Identity, error) { + + source, _ := filepath.Split(os.Args[0]) + + target := "C:\\Docker" + targetExecutable := target + "\\containerutility.exe" + + optionsPlatform, err := platforms.Parse(builder.options.Platform) + if err != nil { + return idtools.Identity{}, err + } + + runConfig := copyRunConfig(state.runConfig, + withCmdCommentString("internal run to obtain NT account information.", optionsPlatform.OS)) + + runConfig.ArgsEscaped = true + runConfig.Cmd = []string{targetExecutable, "getaccountsid", accountName} + + hostConfig := &container.HostConfig{Mounts: []mount.Mount{ + { + Type: mount.TypeBind, + Source: source, + Target: target, + ReadOnly: true, + }, + }, + } + + container, err := builder.containerManager.Create(runConfig, hostConfig) + if err != nil { + return idtools.Identity{}, err + } + + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + + if err := builder.containerManager.Run(builder.clientCtx, container.ID, stdout, stderr); err != nil { + if err, ok := err.(*statusCodeError); ok { + return idtools.Identity{}, &jsonmessage.JSONError{ + Message: stderr.String(), + Code: err.StatusCode(), + } + } + return idtools.Identity{}, err + } + + accountSid := stdout.String() + + return idtools.Identity{SID: accountSid}, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerfile/metrics.go b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/metrics.go new file mode 100644 index 000000000..ceafa7ad6 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerfile/metrics.go @@ -0,0 +1,44 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "github.com/docker/go-metrics" +) + +var ( + buildsTriggered metrics.Counter + buildsFailed metrics.LabeledCounter +) + +// Build metrics prometheus messages, these values must be initialized before +// using them. See the example below in the "builds_failed" metric definition. +const ( + metricsDockerfileSyntaxError = "dockerfile_syntax_error" + metricsDockerfileEmptyError = "dockerfile_empty_error" + metricsCommandNotSupportedError = "command_not_supported_error" + metricsErrorProcessingCommandsError = "error_processing_commands_error" + metricsBuildTargetNotReachableError = "build_target_not_reachable_error" + metricsMissingOnbuildArgumentsError = "missing_onbuild_arguments_error" + metricsUnknownInstructionError = "unknown_instruction_error" + metricsBuildCanceled = "build_canceled" +) + +func init() { + buildMetrics := metrics.NewNamespace("builder", "", nil) + + buildsTriggered = buildMetrics.NewCounter("builds_triggered", "Number of triggered image builds") + buildsFailed = buildMetrics.NewLabeledCounter("builds_failed", "Number of failed image builds", "reason") + for _, r := range []string{ + metricsDockerfileSyntaxError, + metricsDockerfileEmptyError, + metricsCommandNotSupportedError, + metricsErrorProcessingCommandsError, + metricsBuildTargetNotReachableError, + metricsMissingOnbuildArgumentsError, + metricsUnknownInstructionError, + metricsBuildCanceled, + } { + buildsFailed.WithValues(r) + } + + metrics.Register(buildMetrics) +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go b/extender/code/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go new file mode 100644 index 000000000..57f224afc --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go @@ -0,0 +1,64 @@ +package dockerignore // import "github.com/docker/docker/builder/dockerignore" + +import ( + "bufio" + "bytes" + "fmt" + "io" + "path/filepath" + "strings" +) + +// ReadAll reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +func ReadAll(reader io.Reader) ([]string, error) { + if reader == nil { + return nil, nil + } + + scanner := bufio.NewScanner(reader) + var excludes []string + currentLine := 0 + + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + pattern := string(scannedBytes) + currentLine++ + // Lines starting with # (comments) are ignored before processing + if strings.HasPrefix(pattern, "#") { + continue + } + pattern = strings.TrimSpace(pattern) + if pattern == "" { + continue + } + // normalize absolute paths to paths relative to the context + // (taking care of '!' prefix) + invert := pattern[0] == '!' + if invert { + pattern = strings.TrimSpace(pattern[1:]) + } + if len(pattern) > 0 { + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + if len(pattern) > 1 && pattern[0] == '/' { + pattern = pattern[1:] + } + } + if invert { + pattern = "!" + pattern + } + + excludes = append(excludes, pattern) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading .dockerignore: %v", err) + } + return excludes, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/fscache/fscache.go b/extender/code/vendor/github.com/docker/docker/builder/fscache/fscache.go new file mode 100644 index 000000000..8897fe16d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/fscache/fscache.go @@ -0,0 +1,653 @@ +package fscache // import "github.com/docker/docker/builder/fscache" + +import ( + "archive/tar" + "context" + "crypto/sha256" + "encoding/json" + "hash" + "os" + "path/filepath" + "sort" + "sync" + "time" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/tarsum" + "github.com/moby/buildkit/session/filesync" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/tonistiigi/fsutil" + fsutiltypes "github.com/tonistiigi/fsutil/types" + bolt "go.etcd.io/bbolt" + "golang.org/x/sync/singleflight" +) + +const dbFile = "fscache.db" +const cacheKey = "cache" +const metaKey = "meta" + +// Backend is a backing implementation for FSCache +type Backend interface { + Get(id string) (string, error) + Remove(id string) error +} + +// FSCache allows syncing remote resources to cached snapshots +type FSCache struct { + opt Opt + transports map[string]Transport + mu sync.Mutex + g singleflight.Group + store *fsCacheStore +} + +// Opt defines options for initializing FSCache +type Opt struct { + Backend Backend + Root string // for storing local metadata + GCPolicy GCPolicy +} + +// GCPolicy defines policy for garbage collection +type GCPolicy struct { + MaxSize uint64 + MaxKeepDuration time.Duration +} + +// NewFSCache returns new FSCache object +func NewFSCache(opt Opt) (*FSCache, error) { + store, err := newFSCacheStore(opt) + if err != nil { + return nil, err + } + return &FSCache{ + store: store, + opt: opt, + transports: make(map[string]Transport), + }, nil +} + +// Transport defines a method for syncing remote data to FSCache +type Transport interface { + Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error +} + +// RemoteIdentifier identifies a transfer request +type RemoteIdentifier interface { + Key() string + SharedKey() string + Transport() string +} + +// RegisterTransport registers a new transport method +func (fsc *FSCache) RegisterTransport(id string, transport Transport) error { + fsc.mu.Lock() + defer fsc.mu.Unlock() + if _, ok := fsc.transports[id]; ok { + return errors.Errorf("transport %v already exists", id) + } + fsc.transports[id] = transport + return nil +} + +// SyncFrom returns a source based on a remote identifier +func (fsc *FSCache) SyncFrom(ctx context.Context, id RemoteIdentifier) (builder.Source, error) { // cacheOpt + trasportID := id.Transport() + fsc.mu.Lock() + transport, ok := fsc.transports[id.Transport()] + if !ok { + fsc.mu.Unlock() + return nil, errors.Errorf("invalid transport %s", trasportID) + } + + logrus.Debugf("SyncFrom %s %s", id.Key(), id.SharedKey()) + fsc.mu.Unlock() + sourceRef, err, _ := fsc.g.Do(id.Key(), func() (interface{}, error) { + var sourceRef *cachedSourceRef + sourceRef, err := fsc.store.Get(id.Key()) + if err == nil { + return sourceRef, nil + } + + // check for unused shared cache + sharedKey := id.SharedKey() + if sharedKey != "" { + r, err := fsc.store.Rebase(sharedKey, id.Key()) + if err == nil { + sourceRef = r + } + } + + if sourceRef == nil { + var err error + sourceRef, err = fsc.store.New(id.Key(), sharedKey) + if err != nil { + return nil, errors.Wrap(err, "failed to create remote context") + } + } + + if err := syncFrom(ctx, sourceRef, transport, id); err != nil { + sourceRef.Release() + return nil, err + } + if err := sourceRef.resetSize(-1); err != nil { + return nil, err + } + return sourceRef, nil + }) + if err != nil { + return nil, err + } + ref := sourceRef.(*cachedSourceRef) + if ref.src == nil { // failsafe + return nil, errors.Errorf("invalid empty pull") + } + wc := &wrappedContext{Source: ref.src, closer: func() error { + ref.Release() + return nil + }} + return wc, nil +} + +// DiskUsage reports how much data is allocated by the cache +func (fsc *FSCache) DiskUsage(ctx context.Context) (int64, error) { + return fsc.store.DiskUsage(ctx) +} + +// Prune allows manually cleaning up the cache +func (fsc *FSCache) Prune(ctx context.Context) (uint64, error) { + return fsc.store.Prune(ctx) +} + +// Close stops the gc and closes the persistent db +func (fsc *FSCache) Close() error { + return fsc.store.Close() +} + +func syncFrom(ctx context.Context, cs *cachedSourceRef, transport Transport, id RemoteIdentifier) (retErr error) { + src := cs.src + if src == nil { + src = remotecontext.NewCachableSource(cs.Dir()) + } + + if !cs.cached { + if err := cs.storage.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(id.Key())) + dt := b.Get([]byte(cacheKey)) + if dt != nil { + if err := src.UnmarshalBinary(dt); err != nil { + return err + } + } else { + return errors.Wrap(src.Scan(), "failed to scan cache records") + } + return nil + }); err != nil { + return err + } + } + + dc := &detectChanges{f: src.HandleChange} + + // todo: probably send a bucket to `Copy` and let it return source + // but need to make sure that tx is safe + if err := transport.Copy(ctx, id, cs.Dir(), dc); err != nil { + return errors.Wrapf(err, "failed to copy to %s", cs.Dir()) + } + + if !dc.supported { + if err := src.Scan(); err != nil { + return errors.Wrap(err, "failed to scan cache records after transfer") + } + } + cs.cached = true + cs.src = src + return cs.storage.db.Update(func(tx *bolt.Tx) error { + dt, err := src.MarshalBinary() + if err != nil { + return err + } + b := tx.Bucket([]byte(id.Key())) + return b.Put([]byte(cacheKey), dt) + }) +} + +type fsCacheStore struct { + mu sync.Mutex + sources map[string]*cachedSource + db *bolt.DB + fs Backend + gcTimer *time.Timer + gcPolicy GCPolicy +} + +// CachePolicy defines policy for keeping a resource in cache +type CachePolicy struct { + Priority int + LastUsed time.Time +} + +func defaultCachePolicy() CachePolicy { + return CachePolicy{Priority: 10, LastUsed: time.Now()} +} + +func newFSCacheStore(opt Opt) (*fsCacheStore, error) { + if err := os.MkdirAll(opt.Root, 0700); err != nil { + return nil, err + } + p := filepath.Join(opt.Root, dbFile) + db, err := bolt.Open(p, 0600, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to open database file %s") + } + s := &fsCacheStore{db: db, sources: make(map[string]*cachedSource), fs: opt.Backend, gcPolicy: opt.GCPolicy} + db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + dt := b.Get([]byte(metaKey)) + if dt == nil { + return nil + } + var sm sourceMeta + if err := json.Unmarshal(dt, &sm); err != nil { + return err + } + dir, err := s.fs.Get(sm.BackendID) + if err != nil { + return err // TODO: handle gracefully + } + source := &cachedSource{ + refs: make(map[*cachedSourceRef]struct{}), + id: string(name), + dir: dir, + sourceMeta: sm, + storage: s, + } + s.sources[string(name)] = source + return nil + }) + }) + + s.gcTimer = s.startPeriodicGC(5 * time.Minute) + return s, nil +} + +func (s *fsCacheStore) startPeriodicGC(interval time.Duration) *time.Timer { + var t *time.Timer + t = time.AfterFunc(interval, func() { + if err := s.GC(); err != nil { + logrus.Errorf("build gc error: %v", err) + } + t.Reset(interval) + }) + return t +} + +func (s *fsCacheStore) Close() error { + s.gcTimer.Stop() + return s.db.Close() +} + +func (s *fsCacheStore) New(id, sharedKey string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + var ret *cachedSource + if err := s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte(id)) + if err != nil { + return err + } + backendID := stringid.GenerateRandomID() + dir, err := s.fs.Get(backendID) + if err != nil { + return err + } + source := &cachedSource{ + refs: make(map[*cachedSourceRef]struct{}), + id: id, + dir: dir, + sourceMeta: sourceMeta{ + BackendID: backendID, + SharedKey: sharedKey, + CachePolicy: defaultCachePolicy(), + }, + storage: s, + } + dt, err := json.Marshal(source.sourceMeta) + if err != nil { + return err + } + if err := b.Put([]byte(metaKey), dt); err != nil { + return err + } + s.sources[id] = source + ret = source + return nil + }); err != nil { + return nil, err + } + return ret.getRef(), nil +} + +func (s *fsCacheStore) Rebase(sharedKey, newid string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + var ret *cachedSource + for id, snap := range s.sources { + if snap.SharedKey == sharedKey && len(snap.refs) == 0 { + if err := s.db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte(id)); err != nil { + return err + } + b, err := tx.CreateBucket([]byte(newid)) + if err != nil { + return err + } + snap.id = newid + snap.CachePolicy = defaultCachePolicy() + dt, err := json.Marshal(snap.sourceMeta) + if err != nil { + return err + } + if err := b.Put([]byte(metaKey), dt); err != nil { + return err + } + delete(s.sources, id) + s.sources[newid] = snap + return nil + }); err != nil { + return nil, err + } + ret = snap + break + } + } + if ret == nil { + return nil, errors.Errorf("no candidate for rebase") + } + return ret.getRef(), nil +} + +func (s *fsCacheStore) Get(id string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + src, ok := s.sources[id] + if !ok { + return nil, errors.Errorf("not found") + } + return src.getRef(), nil +} + +// DiskUsage reports how much data is allocated by the cache +func (s *fsCacheStore) DiskUsage(ctx context.Context) (int64, error) { + s.mu.Lock() + defer s.mu.Unlock() + var size int64 + + for _, snap := range s.sources { + if len(snap.refs) == 0 { + ss, err := snap.getSize(ctx) + if err != nil { + return 0, err + } + size += ss + } + } + return size, nil +} + +// Prune allows manually cleaning up the cache +func (s *fsCacheStore) Prune(ctx context.Context) (uint64, error) { + s.mu.Lock() + defer s.mu.Unlock() + var size uint64 + + for id, snap := range s.sources { + select { + case <-ctx.Done(): + logrus.Debugf("Cache prune operation cancelled, pruned size: %d", size) + // when the context is cancelled, only return current size and nil + return size, nil + default: + } + if len(snap.refs) == 0 { + ss, err := snap.getSize(ctx) + if err != nil { + return size, err + } + if err := s.delete(id); err != nil { + return size, errors.Wrapf(err, "failed to delete %s", id) + } + size += uint64(ss) + } + } + return size, nil +} + +// GC runs a garbage collector on FSCache +func (s *fsCacheStore) GC() error { + s.mu.Lock() + defer s.mu.Unlock() + var size uint64 + + ctx := context.Background() + cutoff := time.Now().Add(-s.gcPolicy.MaxKeepDuration) + var blacklist []*cachedSource + + for id, snap := range s.sources { + if len(snap.refs) == 0 { + if cutoff.After(snap.CachePolicy.LastUsed) { + if err := s.delete(id); err != nil { + return errors.Wrapf(err, "failed to delete %s", id) + } + } else { + ss, err := snap.getSize(ctx) + if err != nil { + return err + } + size += uint64(ss) + blacklist = append(blacklist, snap) + } + } + } + + sort.Sort(sortableCacheSources(blacklist)) + for _, snap := range blacklist { + if size <= s.gcPolicy.MaxSize { + break + } + ss, err := snap.getSize(ctx) + if err != nil { + return err + } + if err := s.delete(snap.id); err != nil { + return errors.Wrapf(err, "failed to delete %s", snap.id) + } + size -= uint64(ss) + } + return nil +} + +// keep mu while calling this +func (s *fsCacheStore) delete(id string) error { + src, ok := s.sources[id] + if !ok { + return nil + } + if len(src.refs) > 0 { + return errors.Errorf("can't delete %s because it has active references", id) + } + delete(s.sources, id) + if err := s.db.Update(func(tx *bolt.Tx) error { + return tx.DeleteBucket([]byte(id)) + }); err != nil { + return err + } + return s.fs.Remove(src.BackendID) +} + +type sourceMeta struct { + SharedKey string + BackendID string + CachePolicy CachePolicy + Size int64 +} + +type cachedSource struct { + sourceMeta + refs map[*cachedSourceRef]struct{} + id string + dir string + src *remotecontext.CachableSource + storage *fsCacheStore + cached bool // keep track if cache is up to date +} + +type cachedSourceRef struct { + *cachedSource +} + +func (cs *cachedSource) Dir() string { + return cs.dir +} + +// hold storage lock before calling +func (cs *cachedSource) getRef() *cachedSourceRef { + ref := &cachedSourceRef{cachedSource: cs} + cs.refs[ref] = struct{}{} + return ref +} + +// hold storage lock before calling +func (cs *cachedSource) getSize(ctx context.Context) (int64, error) { + if cs.sourceMeta.Size < 0 { + ss, err := directory.Size(ctx, cs.dir) + if err != nil { + return 0, err + } + if err := cs.resetSize(ss); err != nil { + return 0, err + } + return ss, nil + } + return cs.sourceMeta.Size, nil +} + +func (cs *cachedSource) resetSize(val int64) error { + cs.sourceMeta.Size = val + return cs.saveMeta() +} +func (cs *cachedSource) saveMeta() error { + return cs.storage.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(cs.id)) + dt, err := json.Marshal(cs.sourceMeta) + if err != nil { + return err + } + return b.Put([]byte(metaKey), dt) + }) +} + +func (csr *cachedSourceRef) Release() error { + csr.cachedSource.storage.mu.Lock() + defer csr.cachedSource.storage.mu.Unlock() + delete(csr.cachedSource.refs, csr) + if len(csr.cachedSource.refs) == 0 { + go csr.cachedSource.storage.GC() + } + return nil +} + +type detectChanges struct { + f fsutil.ChangeFunc + supported bool +} + +func (dc *detectChanges) HandleChange(kind fsutil.ChangeKind, path string, fi os.FileInfo, err error) error { + if dc == nil { + return nil + } + return dc.f(kind, path, fi, err) +} + +func (dc *detectChanges) MarkSupported(v bool) { + if dc == nil { + return + } + dc.supported = v +} + +func (dc *detectChanges) ContentHasher() fsutil.ContentHasher { + return newTarsumHash +} + +type wrappedContext struct { + builder.Source + closer func() error +} + +func (wc *wrappedContext) Close() error { + if err := wc.Source.Close(); err != nil { + return err + } + return wc.closer() +} + +type sortableCacheSources []*cachedSource + +// Len is the number of elements in the collection. +func (s sortableCacheSources) Len() int { + return len(s) +} + +// Less reports whether the element with +// index i should sort before the element with index j. +func (s sortableCacheSources) Less(i, j int) bool { + return s[i].CachePolicy.LastUsed.Before(s[j].CachePolicy.LastUsed) +} + +// Swap swaps the elements with indexes i and j. +func (s sortableCacheSources) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func newTarsumHash(stat *fsutiltypes.Stat) (hash.Hash, error) { + fi := &fsutil.StatInfo{Stat: stat} + p := stat.Path + if fi.IsDir() { + p += string(os.PathSeparator) + } + h, err := archive.FileInfoHeader(p, fi, stat.Linkname) + if err != nil { + return nil, err + } + h.Name = p + h.Uid = int(stat.Uid) + h.Gid = int(stat.Gid) + h.Linkname = stat.Linkname + if stat.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range stat.Xattrs { + h.Xattrs[k] = string(v) + } + } + + tsh := &tarsumHash{h: h, Hash: sha256.New()} + tsh.Reset() + return tsh, nil +} + +// Reset resets the Hash to its initial state. +func (tsh *tarsumHash) Reset() { + tsh.Hash.Reset() + tarsum.WriteV1Header(tsh.h, tsh.Hash) +} + +type tarsumHash struct { + hash.Hash + h *tar.Header +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/fscache/naivedriver.go b/extender/code/vendor/github.com/docker/docker/builder/fscache/naivedriver.go new file mode 100644 index 000000000..053509aec --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/fscache/naivedriver.go @@ -0,0 +1,28 @@ +package fscache // import "github.com/docker/docker/builder/fscache" + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// NewNaiveCacheBackend is a basic backend implementation for fscache +func NewNaiveCacheBackend(root string) Backend { + return &naiveCacheBackend{root: root} +} + +type naiveCacheBackend struct { + root string +} + +func (tcb *naiveCacheBackend) Get(id string) (string, error) { + d := filepath.Join(tcb.root, id) + if err := os.MkdirAll(d, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create tmp dir for %s", d) + } + return d, nil +} +func (tcb *naiveCacheBackend) Remove(id string) error { + return errors.WithStack(os.RemoveAll(filepath.Join(tcb.root, id))) +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/remotecontext/archive.go b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/archive.go new file mode 100644 index 000000000..6d247f945 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/archive.go @@ -0,0 +1,125 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "io" + "os" + "path/filepath" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/tarsum" + "github.com/pkg/errors" +) + +type archiveContext struct { + root containerfs.ContainerFS + sums tarsum.FileInfoSums +} + +func (c *archiveContext) Close() error { + return c.root.RemoveAll(c.root.Path()) +} + +func convertPathError(err error, cleanpath string) error { + if err, ok := err.(*os.PathError); ok { + err.Path = cleanpath + return err + } + return err +} + +type modifiableContext interface { + builder.Source + // Remove deletes the entry specified by `path`. + // It is usual for directory entries to delete all its subentries. + Remove(path string) error +} + +// FromArchive returns a build source from a tar stream. +// +// It extracts the tar stream to a temporary folder that is deleted as soon as +// the Context is closed. +// As the extraction happens, a tarsum is calculated for every file, and the set of +// all those sums then becomes the source of truth for all operations on this Context. +// +// Closing tarStream has to be done by the caller. +func FromArchive(tarStream io.Reader) (builder.Source, error) { + root, err := ioutils.TempDir("", "docker-builder") + if err != nil { + return nil, err + } + + // Assume local file system. Since it's coming from a tar file. + tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)} + + // Make sure we clean-up upon error. In the happy case the caller + // is expected to manage the clean-up + defer func() { + if err != nil { + tsc.Close() + } + }() + + decompressedStream, err := archive.DecompressStream(tarStream) + if err != nil { + return nil, err + } + + sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) + if err != nil { + return nil, err + } + + err = chrootarchive.Untar(sum, root, nil) + if err != nil { + return nil, err + } + + tsc.sums = sum.GetSums() + return tsc, nil +} + +func (c *archiveContext) Root() containerfs.ContainerFS { + return c.root +} + +func (c *archiveContext) Remove(path string) error { + _, fullpath, err := normalize(path, c.root) + if err != nil { + return err + } + return c.root.RemoveAll(fullpath) +} + +func (c *archiveContext) Hash(path string) (string, error) { + cleanpath, fullpath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + rel, err := c.root.Rel(c.root.Path(), fullpath) + if err != nil { + return "", convertPathError(err, cleanpath) + } + + // Use the checksum of the followed path(not the possible symlink) because + // this is the file that is actually copied. + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + return tsInfo.Sum(), nil + } + // We set sum to path by default for the case where GetFile returns nil. + // The usual case is if relative path is empty. + return path, nil // backwards compat TODO: see if really needed +} + +func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) { + cleanPath = root.Clean(string(root.Separator()) + path)[1:] + fullPath, err = root.ResolveScopedPath(path, true) + if err != nil { + return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath) + } + return +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/remotecontext/detect.go b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/detect.go new file mode 100644 index 000000000..1becd0fd5 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/detect.go @@ -0,0 +1,191 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "bufio" + "fmt" + "io" + "os" + "runtime" + "strings" + + "github.com/containerd/continuity/driver" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ClientSessionRemote is identifier for client-session context transport +const ClientSessionRemote = "client-session" + +// Detect returns a context and dockerfile from remote location or local +// archive. +func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) { + remoteURL := config.Options.RemoteContext + dockerfilePath := config.Options.Dockerfile + + switch { + case remoteURL == "": + remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath) + case remoteURL == ClientSessionRemote: + res, err := parser.Parse(config.Source) + if err != nil { + return nil, nil, errdefs.InvalidParameter(err) + } + + return nil, res, nil + case urlutil.IsGitURL(remoteURL): + remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath) + case urlutil.IsURL(remoteURL): + remote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc) + default: + err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) + } + return +} + +func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) { + defer rc.Close() + c, err := FromArchive(rc) + if err != nil { + return nil, nil, err + } + + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) { + df, err := openAt(c, dockerfilePath) + if err != nil { + if os.IsNotExist(err) { + if dockerfilePath == builder.DefaultDockerfileName { + lowercase := strings.ToLower(dockerfilePath) + if _, err := StatAt(c, lowercase); err == nil { + return withDockerfileFromContext(c, lowercase) + } + } + return nil, nil, errors.Errorf("Cannot locate specified Dockerfile: %s", dockerfilePath) // backwards compatible error + } + c.Close() + return nil, nil, err + } + + res, err := readAndParseDockerfile(dockerfilePath, df) + if err != nil { + return nil, nil, err + } + + df.Close() + + if err := removeDockerfile(c, dockerfilePath); err != nil { + c.Close() + return nil, nil, err + } + + return c, res, nil +} + +func newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) { + c, err := MakeGitContext(gitURL) // TODO: change this to NewLazySource + if err != nil { + return nil, nil, err + } + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) { + contentType, content, err := downloadRemote(url) + if err != nil { + return nil, nil, err + } + defer content.Close() + + switch contentType { + case mimeTypes.TextPlain: + res, err := parser.Parse(progressReader(content)) + return nil, res, errdefs.InvalidParameter(err) + default: + source, err := FromArchive(progressReader(content)) + if err != nil { + return nil, nil, err + } + return withDockerfileFromContext(source.(modifiableContext), dockerfilePath) + } +} + +func removeDockerfile(c modifiableContext, filesToRemove ...string) error { + f, err := openAt(c, ".dockerignore") + // Note that a missing .dockerignore file isn't treated as an error + switch { + case os.IsNotExist(err): + return nil + case err != nil: + return err + } + excludes, err := dockerignore.ReadAll(f) + if err != nil { + f.Close() + return err + } + f.Close() + filesToRemove = append([]string{".dockerignore"}, filesToRemove...) + for _, fileToRemove := range filesToRemove { + if rm, _ := fileutils.Matches(fileToRemove, excludes); rm { + if err := c.Remove(fileToRemove); err != nil { + logrus.Errorf("failed to remove %s: %v", fileToRemove, err) + } + } + } + return nil +} + +func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) { + br := bufio.NewReader(rc) + if _, err := br.Peek(1); err != nil { + if err == io.EOF { + return nil, errdefs.InvalidParameter(errors.Errorf("the Dockerfile (%s) cannot be empty", name)) + } + return nil, errors.Wrap(err, "unexpected error reading Dockerfile") + } + + dockerfile, err := parser.Parse(br) + if err != nil { + return nil, errdefs.InvalidParameter(errors.Wrapf(err, "failed to parse %s", name)) + } + + return dockerfile, nil +} + +func openAt(remote builder.Source, path string) (driver.File, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return remote.Root().Open(fullPath) +} + +// StatAt is a helper for calling Stat on a path from a source +func StatAt(remote builder.Source, path string) (os.FileInfo, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return remote.Root().Stat(fullPath) +} + +// FullPath is a helper for getting a full path for a path from a source +func FullPath(remote builder.Source, path string) (string, error) { + fullPath, err := remote.Root().ResolveScopedPath(path, true) + if err != nil { + if runtime.GOOS == "windows" { + return "", fmt.Errorf("failed to resolve scoped path %s (%s): %s. Possible cause is a forbidden path outside the build context", path, fullPath, err) + } + return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error + } + return fullPath, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/remotecontext/filehash.go b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/filehash.go new file mode 100644 index 000000000..3565dd827 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/filehash.go @@ -0,0 +1,45 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "archive/tar" + "crypto/sha256" + "hash" + "os" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/tarsum" +) + +// NewFileHash returns new hash that is used for the builder cache keys +func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) { + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return nil, err + } + } + hdr, err := archive.FileInfoHeader(name, fi, link) + if err != nil { + return nil, err + } + if err := archive.ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return nil, err + } + tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} + tsh.Reset() // initialize header + return tsh, nil +} + +type tarsumHash struct { + hash.Hash + hdr *tar.Header +} + +// Reset resets the Hash to its initial state. +func (tsh *tarsumHash) Reset() { + // comply with hash.Hash and reset to the state hash had before any writes + tsh.Hash.Reset() + tarsum.WriteV1Header(tsh.hdr, tsh.Hash) +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/remotecontext/generate.go b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/generate.go new file mode 100644 index 000000000..84c1b3b5e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/generate.go @@ -0,0 +1,3 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +//go:generate protoc --gogoslick_out=. tarsum.proto diff --git a/extender/code/vendor/github.com/docker/docker/builder/remotecontext/git.go b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/git.go new file mode 100644 index 000000000..1583ca28d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/git.go @@ -0,0 +1,35 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "os" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext/git" + "github.com/docker/docker/pkg/archive" + "github.com/sirupsen/logrus" +) + +// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. +func MakeGitContext(gitURL string) (builder.Source, error) { + root, err := git.Clone(gitURL) + if err != nil { + return nil, err + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return nil, err + } + + defer func() { + err := c.Close() + if err != nil { + logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while closing git context") + } + err = os.RemoveAll(root) + if err != nil { + logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while removing path and children of root") + } + }() + return FromArchive(c) +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go new file mode 100644 index 000000000..77a45beff --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go @@ -0,0 +1,204 @@ +package git // import "github.com/docker/docker/builder/remotecontext/git" + +import ( + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +type gitRepo struct { + remote string + ref string + subdir string +} + +// Clone clones a repository into a newly created directory which +// will be under "docker-build-git" +func Clone(remoteURL string) (string, error) { + repo, err := parseRemoteURL(remoteURL) + + if err != nil { + return "", err + } + + return cloneGitRepo(repo) +} + +func cloneGitRepo(repo gitRepo) (checkoutDir string, err error) { + fetch := fetchArgs(repo.remote, repo.ref) + + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return "", err + } + + defer func() { + if err != nil { + os.RemoveAll(root) + } + }() + + if out, err := gitWithinDir(root, "init"); err != nil { + return "", errors.Wrapf(err, "failed to init repo at %s: %s", root, out) + } + + // Add origin remote for compatibility with previous implementation that + // used "git clone" and also to make sure local refs are created for branches + if out, err := gitWithinDir(root, "remote", "add", "origin", repo.remote); err != nil { + return "", errors.Wrapf(err, "failed add origin repo at %s: %s", repo.remote, out) + } + + if output, err := gitWithinDir(root, fetch...); err != nil { + return "", errors.Wrapf(err, "error fetching: %s", output) + } + + checkoutDir, err = checkoutGit(root, repo.ref, repo.subdir) + if err != nil { + return "", err + } + + cmd := exec.Command("git", "submodule", "update", "--init", "--recursive", "--depth=1") + cmd.Dir = root + output, err := cmd.CombinedOutput() + if err != nil { + return "", errors.Wrapf(err, "error initializing submodules: %s", output) + } + + return checkoutDir, nil +} + +func parseRemoteURL(remoteURL string) (gitRepo, error) { + repo := gitRepo{} + + if !isGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + + var fragment string + if strings.HasPrefix(remoteURL, "git@") { + // git@.. is not an URL, so cannot be parsed as URL + parts := strings.SplitN(remoteURL, "#", 2) + + repo.remote = parts[0] + if len(parts) == 2 { + fragment = parts[1] + } + repo.ref, repo.subdir = getRefAndSubdir(fragment) + } else { + u, err := url.Parse(remoteURL) + if err != nil { + return repo, err + } + + repo.ref, repo.subdir = getRefAndSubdir(u.Fragment) + u.Fragment = "" + repo.remote = u.String() + } + return repo, nil +} + +func getRefAndSubdir(fragment string) (ref string, subdir string) { + refAndDir := strings.SplitN(fragment, ":", 2) + ref = "master" + if len(refAndDir[0]) != 0 { + ref = refAndDir[0] + } + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + subdir = refAndDir[1] + } + return +} + +func fetchArgs(remoteURL string, ref string) []string { + args := []string{"fetch"} + + if supportsShallowClone(remoteURL) { + args = append(args, "--depth", "1") + } + + return append(args, "origin", ref) +} + +// Check if a given git URL supports a shallow git clone, +// i.e. it is a non-HTTP server or a smart HTTP server. +func supportsShallowClone(remoteURL string) bool { + if urlutil.IsURL(remoteURL) { + // Check if the HTTP server is smart + + // Smart servers must correctly respond to a query for the git-upload-pack service + serviceURL := remoteURL + "/info/refs?service=git-upload-pack" + + // Try a HEAD request and fallback to a Get request on error + res, err := http.Head(serviceURL) + if err != nil || res.StatusCode != http.StatusOK { + res, err = http.Get(serviceURL) + if err == nil { + res.Body.Close() + } + if err != nil || res.StatusCode != http.StatusOK { + // request failed + return false + } + } + + if res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { + // Fallback, not a smart server + return false + } + return true + } + // Non-HTTP protocols always support shallow clones + return true +} + +func checkoutGit(root, ref, subdir string) (string, error) { + // Try checking out by ref name first. This will work on branches and sets + // .git/HEAD to the current branch name + if output, err := gitWithinDir(root, "checkout", ref); err != nil { + // If checking out by branch name fails check out the last fetched ref + if _, err2 := gitWithinDir(root, "checkout", "FETCH_HEAD"); err2 != nil { + return "", errors.Wrapf(err, "error checking out %s: %s", ref, output) + } + } + + if subdir != "" { + newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, subdir), root) + if err != nil { + return "", errors.Wrapf(err, "error setting git context, %q not within git root", subdir) + } + + fi, err := os.Stat(newCtx) + if err != nil { + return "", err + } + if !fi.IsDir() { + return "", errors.Errorf("error setting git context, not a directory: %s", newCtx) + } + root = newCtx + } + + return root, nil +} + +func gitWithinDir(dir string, args ...string) ([]byte, error) { + a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} + return git(append(a, args...)...) +} + +func git(args ...string) ([]byte, error) { + return exec.Command("git", args...).CombinedOutput() +} + +// isGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func isGitTransport(str string) bool { + return urlutil.IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go new file mode 100644 index 000000000..96435f258 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go @@ -0,0 +1,102 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "encoding/hex" + "os" + "strings" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/pools" + "github.com/pkg/errors" +) + +// NewLazySource creates a new LazyContext. LazyContext defines a hashed build +// context based on a root directory. Individual files are hashed first time +// they are asked. It is not safe to call methods of LazyContext concurrently. +func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) { + return &lazySource{ + root: root, + sums: make(map[string]string), + }, nil +} + +type lazySource struct { + root containerfs.ContainerFS + sums map[string]string +} + +func (c *lazySource) Root() containerfs.ContainerFS { + return c.root +} + +func (c *lazySource) Close() error { + return nil +} + +func (c *lazySource) Hash(path string) (string, error) { + cleanPath, fullPath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + relPath, err := Rel(c.root, fullPath) + if err != nil { + return "", errors.WithStack(convertPathError(err, cleanPath)) + } + + fi, err := c.root.Lstat(fullPath) + if err != nil { + // Backwards compatibility: a missing file returns a path as hash. + // This is reached in the case of a broken symlink. + return relPath, nil + } + + sum, ok := c.sums[relPath] + if !ok { + sum, err = c.prepareHash(relPath, fi) + if err != nil { + return "", err + } + } + + return sum, nil +} + +func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) { + p := c.root.Join(c.root.Path(), relPath) + h, err := NewFileHash(p, relPath, fi) + if err != nil { + return "", errors.Wrapf(err, "failed to create hash for %s", relPath) + } + if fi.Mode().IsRegular() && fi.Size() > 0 { + f, err := c.root.Open(p) + if err != nil { + return "", errors.Wrapf(err, "failed to open %s", relPath) + } + defer f.Close() + if _, err := pools.Copy(h, f); err != nil { + return "", errors.Wrapf(err, "failed to copy file data for %s", relPath) + } + } + sum := hex.EncodeToString(h.Sum(nil)) + c.sums[relPath] = sum + return sum, nil +} + +// Rel makes a path relative to base path. Same as `filepath.Rel` but can also +// handle UUID paths in windows. +func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) { + // filepath.Rel can't handle UUID paths in windows + if basepath.OS() == "windows" { + pfx := basepath.Path() + `\` + if strings.HasPrefix(targpath, pfx) { + p := strings.TrimPrefix(targpath, pfx) + if p == "" { + p = "." + } + return p, nil + } + } + return basepath.Rel(basepath.Path(), targpath) +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go new file mode 100644 index 000000000..e8a6210e9 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go @@ -0,0 +1,27 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "mime" + "net/http" +) + +// mimeTypes stores the MIME content type. +var mimeTypes = struct { + TextPlain string + OctetStream string +}{"text/plain", "application/octet-stream"} + +// detectContentType returns a best guess representation of the MIME +// content type for the bytes at c. The value detected by +// http.DetectContentType is guaranteed not be nil, defaulting to +// application/octet-stream when a better guess cannot be made. The +// result of this detection is then run through mime.ParseMediaType() +// which separates the actual MIME string from any parameters. +func detectContentType(c []byte) (string, map[string]string, error) { + ct := http.DetectContentType(c) + contentType, args, err := mime.ParseMediaType(ct) + if err != nil { + return "", nil, err + } + return contentType, args, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/remotecontext/remote.go b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/remote.go new file mode 100644 index 000000000..1fb80549b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/remote.go @@ -0,0 +1,127 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "regexp" + + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/ioutils" + "github.com/pkg/errors" +) + +// When downloading remote contexts, limit the amount (in bytes) +// to be read from the response body in order to detect its Content-Type +const maxPreambleLength = 100 + +const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` + +var mimeRe = regexp.MustCompile(acceptableRemoteMIME) + +// downloadRemote context from a url and returns it, along with the parsed content type +func downloadRemote(remoteURL string) (string, io.ReadCloser, error) { + response, err := GetWithStatusError(remoteURL) + if err != nil { + return "", nil, errors.Wrapf(err, "error downloading remote context %s", remoteURL) + } + + contentType, contextReader, err := inspectResponse( + response.Header.Get("Content-Type"), + response.Body, + response.ContentLength) + if err != nil { + response.Body.Close() + return "", nil, errors.Wrapf(err, "error detecting content type for remote %s", remoteURL) + } + + return contentType, ioutils.NewReadCloserWrapper(contextReader, response.Body.Close), nil +} + +// GetWithStatusError does an http.Get() and returns an error if the +// status code is 4xx or 5xx. +func GetWithStatusError(address string) (resp *http.Response, err error) { + if resp, err = http.Get(address); err != nil { + if uerr, ok := err.(*url.Error); ok { + if derr, ok := uerr.Err.(*net.DNSError); ok && !derr.IsTimeout { + return nil, errdefs.NotFound(err) + } + } + return nil, errdefs.System(err) + } + if resp.StatusCode < 400 { + return resp, nil + } + msg := fmt.Sprintf("failed to GET %s with status %s", address, resp.Status) + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, errdefs.System(errors.New(msg + ": error reading body")) + } + + msg += ": " + string(bytes.TrimSpace(body)) + switch resp.StatusCode { + case http.StatusNotFound: + return nil, errdefs.NotFound(errors.New(msg)) + case http.StatusBadRequest: + return nil, errdefs.InvalidParameter(errors.New(msg)) + case http.StatusUnauthorized: + return nil, errdefs.Unauthorized(errors.New(msg)) + case http.StatusForbidden: + return nil, errdefs.Forbidden(errors.New(msg)) + } + return nil, errdefs.Unknown(errors.New(msg)) +} + +// inspectResponse looks into the http response data at r to determine whether its +// content-type is on the list of acceptable content types for remote build contexts. +// This function returns: +// - a string representation of the detected content-type +// - an io.Reader for the response body +// - an error value which will be non-nil either when something goes wrong while +// reading bytes from r or when the detected content-type is not acceptable. +func inspectResponse(ct string, r io.Reader, clen int64) (string, io.Reader, error) { + plen := clen + if plen <= 0 || plen > maxPreambleLength { + plen = maxPreambleLength + } + + preamble := make([]byte, plen) + rlen, err := r.Read(preamble) + if rlen == 0 { + return ct, r, errors.New("empty response") + } + if err != nil && err != io.EOF { + return ct, r, err + } + + preambleR := bytes.NewReader(preamble[:rlen]) + bodyReader := io.MultiReader(preambleR, r) + // Some web servers will use application/octet-stream as the default + // content type for files without an extension (e.g. 'Dockerfile') + // so if we receive this value we better check for text content + contentType := ct + if len(ct) == 0 || ct == mimeTypes.OctetStream { + contentType, _, err = detectContentType(preamble) + if err != nil { + return contentType, bodyReader, err + } + } + + contentType = selectAcceptableMIME(contentType) + var cterr error + if len(contentType) == 0 { + cterr = fmt.Errorf("unsupported Content-Type %q", ct) + contentType = ct + } + + return contentType, bodyReader, cterr +} + +func selectAcceptableMIME(ct string) string { + return mimeRe.FindString(ct) +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go new file mode 100644 index 000000000..b809cfb78 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go @@ -0,0 +1,157 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "os" + "sync" + + "github.com/docker/docker/pkg/containerfs" + iradix "github.com/hashicorp/go-immutable-radix" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" +) + +type hashed interface { + Digest() digest.Digest +} + +// CachableSource is a source that contains cache records for its contents +type CachableSource struct { + mu sync.Mutex + root containerfs.ContainerFS + tree *iradix.Tree + txn *iradix.Txn +} + +// NewCachableSource creates new CachableSource +func NewCachableSource(root string) *CachableSource { + ts := &CachableSource{ + tree: iradix.New(), + root: containerfs.NewLocalContainerFS(root), + } + return ts +} + +// MarshalBinary marshals current cache information to a byte array +func (cs *CachableSource) MarshalBinary() ([]byte, error) { + b := TarsumBackup{Hashes: make(map[string]string)} + root := cs.getRoot() + root.Walk(func(k []byte, v interface{}) bool { + b.Hashes[string(k)] = v.(*fileInfo).sum + return false + }) + return b.Marshal() +} + +// UnmarshalBinary decodes cache information for presented byte array +func (cs *CachableSource) UnmarshalBinary(data []byte) error { + var b TarsumBackup + if err := b.Unmarshal(data); err != nil { + return err + } + txn := iradix.New().Txn() + for p, v := range b.Hashes { + txn.Insert([]byte(p), &fileInfo{sum: v}) + } + cs.mu.Lock() + defer cs.mu.Unlock() + cs.tree = txn.Commit() + return nil +} + +// Scan rescans the cache information from the file system +func (cs *CachableSource) Scan() error { + lc, err := NewLazySource(cs.root) + if err != nil { + return err + } + txn := iradix.New().Txn() + err = cs.root.Walk(cs.root.Path(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return errors.Wrapf(err, "failed to walk %s", path) + } + rel, err := Rel(cs.root, path) + if err != nil { + return err + } + h, err := lc.Hash(rel) + if err != nil { + return err + } + txn.Insert([]byte(rel), &fileInfo{sum: h}) + return nil + }) + if err != nil { + return err + } + cs.mu.Lock() + defer cs.mu.Unlock() + cs.tree = txn.Commit() + return nil +} + +// HandleChange notifies the source about a modification operation +func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + cs.mu.Lock() + if cs.txn == nil { + cs.txn = cs.tree.Txn() + } + if kind == fsutil.ChangeKindDelete { + cs.txn.Delete([]byte(p)) + cs.mu.Unlock() + return + } + + h, ok := fi.(hashed) + if !ok { + cs.mu.Unlock() + return errors.Errorf("invalid fileinfo: %s", p) + } + + hfi := &fileInfo{ + sum: h.Digest().Hex(), + } + cs.txn.Insert([]byte(p), hfi) + cs.mu.Unlock() + return nil +} + +func (cs *CachableSource) getRoot() *iradix.Node { + cs.mu.Lock() + if cs.txn != nil { + cs.tree = cs.txn.Commit() + cs.txn = nil + } + t := cs.tree + cs.mu.Unlock() + return t.Root() +} + +// Close closes the source +func (cs *CachableSource) Close() error { + return nil +} + +// Hash returns a hash for a single file in the source +func (cs *CachableSource) Hash(path string) (string, error) { + n := cs.getRoot() + // TODO: check this for symlinks + v, ok := n.Get([]byte(path)) + if !ok { + return path, nil + } + return v.(*fileInfo).sum, nil +} + +// Root returns a root directory for the source +func (cs *CachableSource) Root() containerfs.ContainerFS { + return cs.root +} + +type fileInfo struct { + sum string +} + +func (fi *fileInfo) Hash() string { + return fi.sum +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go new file mode 100644 index 000000000..1d23bbe65 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go @@ -0,0 +1,525 @@ +// Code generated by protoc-gen-gogo. +// source: tarsum.proto +// DO NOT EDIT! + +/* +Package remotecontext is a generated protocol buffer package. + +It is generated from these files: + tarsum.proto + +It has these top-level messages: + TarsumBackup +*/ +package remotecontext + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type TarsumBackup struct { + Hashes map[string]string `protobuf:"bytes,1,rep,name=Hashes" json:"Hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *TarsumBackup) Reset() { *m = TarsumBackup{} } +func (*TarsumBackup) ProtoMessage() {} +func (*TarsumBackup) Descriptor() ([]byte, []int) { return fileDescriptorTarsum, []int{0} } + +func (m *TarsumBackup) GetHashes() map[string]string { + if m != nil { + return m.Hashes + } + return nil +} + +func init() { + proto.RegisterType((*TarsumBackup)(nil), "remotecontext.TarsumBackup") +} +func (this *TarsumBackup) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TarsumBackup) + if !ok { + that2, ok := that.(TarsumBackup) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Hashes) != len(that1.Hashes) { + return false + } + for i := range this.Hashes { + if this.Hashes[i] != that1.Hashes[i] { + return false + } + } + return true +} +func (this *TarsumBackup) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&remotecontext.TarsumBackup{") + keysForHashes := make([]string, 0, len(this.Hashes)) + for k := range this.Hashes { + keysForHashes = append(keysForHashes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) + mapStringForHashes := "map[string]string{" + for _, k := range keysForHashes { + mapStringForHashes += fmt.Sprintf("%#v: %#v,", k, this.Hashes[k]) + } + mapStringForHashes += "}" + if this.Hashes != nil { + s = append(s, "Hashes: "+mapStringForHashes+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTarsum(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *TarsumBackup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hashes) > 0 { + for k := range m.Hashes { + dAtA[i] = 0xa + i++ + v := m.Hashes[k] + mapSize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) + i = encodeVarintTarsum(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTarsum(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTarsum(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func encodeFixed64Tarsum(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Tarsum(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintTarsum(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *TarsumBackup) Size() (n int) { + var l int + _ = l + if len(m.Hashes) > 0 { + for k, v := range m.Hashes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) + n += mapEntrySize + 1 + sovTarsum(uint64(mapEntrySize)) + } + } + return n +} + +func sovTarsum(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTarsum(x uint64) (n int) { + return sovTarsum(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TarsumBackup) String() string { + if this == nil { + return "nil" + } + keysForHashes := make([]string, 0, len(this.Hashes)) + for k := range this.Hashes { + keysForHashes = append(keysForHashes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) + mapStringForHashes := "map[string]string{" + for _, k := range keysForHashes { + mapStringForHashes += fmt.Sprintf("%v: %v,", k, this.Hashes[k]) + } + mapStringForHashes += "}" + s := strings.Join([]string{`&TarsumBackup{`, + `Hashes:` + mapStringForHashes + `,`, + `}`, + }, "") + return s +} +func valueToStringTarsum(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *TarsumBackup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TarsumBackup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TarsumBackup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hashes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTarsum + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTarsum + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Hashes == nil { + m.Hashes = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTarsum + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Hashes[mapkey] = mapvalue + } else { + var mapvalue string + m.Hashes[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTarsum(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTarsum + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTarsum(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTarsum + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTarsum(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTarsum = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTarsum = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("tarsum.proto", fileDescriptorTarsum) } + +var fileDescriptorTarsum = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x49, 0x2c, 0x2a, + 0x2e, 0xcd, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x4a, 0xcd, 0xcd, 0x2f, 0x49, + 0x4d, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x51, 0xea, 0x62, 0xe4, 0xe2, 0x09, 0x01, 0xcb, 0x3b, + 0x25, 0x26, 0x67, 0x97, 0x16, 0x08, 0xd9, 0x73, 0xb1, 0x79, 0x24, 0x16, 0x67, 0xa4, 0x16, 0x4b, + 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0xa1, 0x68, 0xd0, 0x43, 0x56, 0xac, 0x07, 0x51, + 0xe9, 0x9a, 0x57, 0x52, 0x54, 0x19, 0x04, 0xd5, 0x26, 0x65, 0xc9, 0xc5, 0x8d, 0x24, 0x2c, 0x24, + 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89, + 0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, + 0x46, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1, + 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, + 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, + 0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0x57, 0x7d, 0x3f, + 0xe0, 0x00, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto new file mode 100644 index 000000000..cb94240ba --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package remotecontext; // no namespace because only used internally + +message TarsumBackup { + map Hashes = 1; +} \ No newline at end of file diff --git a/extender/code/vendor/github.com/docker/docker/container/archive.go b/extender/code/vendor/github.com/docker/docker/container/archive.go new file mode 100644 index 000000000..ed72c4a40 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/archive.go @@ -0,0 +1,86 @@ +package container // import "github.com/docker/docker/container" + +import ( + "os" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// ResolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// an error if the path points to outside the container's rootfs. +func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + if container.BaseFS == nil { + return "", "", errors.New("ResolvePath: BaseFS of container " + container.ID + " is unexpectedly nil") + } + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, container.BaseFS) + if err != nil { + return "", "", err + } + + // Consider the given path as an absolute path in the container. + absPath = archive.PreserveTrailingDotOrSeparator( + container.BaseFS.Join(string(container.BaseFS.Separator()), path), + path, + container.BaseFS.Separator()) + + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := container.BaseFS.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) + if err != nil { + return "", "", err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(container.BaseFS.Separator()) + basePath + return resolvedPath, absPath, nil +} + +// StatPath is the unexported version of StatPath. Locks and mounts should +// be acquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + if container.BaseFS == nil { + return nil, errors.New("StatPath: BaseFS of container " + container.ID + " is unexpectedly nil") + } + driver := container.BaseFS + + lstat, err := driver.Lstat(resolvedPath) + if err != nil { + return nil, err + } + + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = driver.Rel(driver.Path(), hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = driver.Join(string(driver.Separator()), linkTarget) + } + + return &types.ContainerPathStat{ + Name: driver.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, + }, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/container/container.go b/extender/code/vendor/github.com/docker/docker/container/container.go new file mode 100644 index 000000000..6a5907c34 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/container.go @@ -0,0 +1,736 @@ +package container // import "github.com/docker/docker/container" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + "time" + + "github.com/containerd/containerd/cio" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/logger/local" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/restartmanager" + "github.com/docker/docker/volume" + volumemounts "github.com/docker/docker/volume/mounts" + "github.com/docker/go-units" + agentexec "github.com/docker/swarmkit/agent/exec" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const configFileName = "config.v2.json" + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool + + // Time at which the container died + ExitedAt time.Time +} + +// Container holds the structure defining a container object. +type Container struct { + StreamConfig *stream.Config + // embed for Container to support states directly. + *State `json:"State"` // Needed for Engine API version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS containerfs.ContainerFS `json:"-"` // interface containing graphdriver mount + RWLayer layer.RWLayer `json:"-"` + ID string + Created time.Time + Managed bool + Path string + Args []string + Config *containertypes.Config + ImageID image.ID `json:"Image"` + NetworkSettings *network.Settings + LogPath string + Name string + Driver string + OS string + // MountLabel contains the options for the 'mount' command + MountLabel string + ProcessLabel string + RestartCount int + HasBeenStartedBefore bool + HasBeenManuallyStopped bool // used for unless-stopped restart policy + MountPoints map[string]*volumemounts.MountPoint + HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable + ExecCommands *exec.Store `json:"-"` + DependencyStore agentexec.DependencyGetter `json:"-"` + SecretReferences []*swarmtypes.SecretReference + ConfigReferences []*swarmtypes.ConfigReference + // logDriver for closing + LogDriver logger.Logger `json:"-"` + LogCopier *logger.Copier `json:"-"` + restartManager restartmanager.RestartManager + attachContext *attachContext + + // Fields here are specific to Unix platforms + AppArmorProfile string + HostnamePath string + HostsPath string + ShmPath string + ResolvConfPath string + SeccompProfile string + NoNewPrivileges bool + + // Fields here are specific to Windows + NetworkSharedContainerID string `json:"-"` + SharedEndpointList []string `json:"-"` +} + +// NewBaseContainer creates a new container with its +// basic configuration. +func NewBaseContainer(id, root string) *Container { + return &Container{ + ID: id, + State: NewState(), + ExecCommands: exec.NewStore(), + Root: root, + MountPoints: make(map[string]*volumemounts.MountPoint), + StreamConfig: stream.NewConfig(), + attachContext: &attachContext{}, + } +} + +// FromDisk loads the container configuration stored in the host. +func (container *Container) FromDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Open(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + + // Load container settings + if err := dec.Decode(container); err != nil { + return err + } + + // Ensure the operating system is set if blank. Assume it is the OS of the + // host OS if not, to ensure containers created before multiple-OS + // support are migrated + if container.OS == "" { + container.OS = runtime.GOOS + } + + return container.readHostConfig() +} + +// toDisk saves the container configuration on disk and returns a deep copy. +func (container *Container) toDisk() (*Container, error) { + var ( + buf bytes.Buffer + deepCopy Container + ) + pth, err := container.ConfigPath() + if err != nil { + return nil, err + } + + // Save container settings + f, err := ioutils.NewAtomicFileWriter(pth, 0600) + if err != nil { + return nil, err + } + defer f.Close() + + w := io.MultiWriter(&buf, f) + if err := json.NewEncoder(w).Encode(container); err != nil { + return nil, err + } + + if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { + return nil, err + } + deepCopy.HostConfig, err = container.WriteHostConfig() + if err != nil { + return nil, err + } + + return &deepCopy, nil +} + +// CheckpointTo makes the Container's current state visible to queries, and persists state. +// Callers must hold a Container lock. +func (container *Container) CheckpointTo(store ViewDB) error { + deepCopy, err := container.toDisk() + if err != nil { + return err + } + return store.Save(deepCopy) +} + +// readHostConfig reads the host configuration from disk for the container. +func (container *Container) readHostConfig() error { + container.HostConfig = &containertypes.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.HostConfig, + // but that's OK, since we just did that above.) + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Open(pth) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { + return err + } + + container.InitDNSHostConfig() + + return nil +} + +// WriteHostConfig saves the host configuration on disk for the container, +// and returns a deep copy of the saved object. Callers must hold a Container lock. +func (container *Container) WriteHostConfig() (*containertypes.HostConfig, error) { + var ( + buf bytes.Buffer + deepCopy containertypes.HostConfig + ) + + pth, err := container.HostConfigPath() + if err != nil { + return nil, err + } + + f, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return nil, err + } + defer f.Close() + + w := io.MultiWriter(&buf, f) + if err := json.NewEncoder(w).Encode(&container.HostConfig); err != nil { + return nil, err + } + + if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { + return nil, err + } + return &deepCopy, nil +} + +// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir +func (container *Container) SetupWorkingDirectory(rootIdentity idtools.Identity) error { + // TODO @jhowardmsft, @gupta-ak LCOW Support. This will need revisiting. + // We will need to do remote filesystem operations here. + if container.OS != runtime.GOOS { + return nil + } + + if container.Config.WorkingDir == "" { + return nil + } + + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + if err := idtools.MkdirAllAndChownNew(pth, 0755, rootIdentity); err != nil { + pthInfo, err2 := os.Stat(pth) + if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { + return errors.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + + return err + } + + return nil +} + +// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path +// sanitisation. Symlinks are all scoped to the BaseFS of the container, as +// though the container's BaseFS was `/`. +// +// The BaseFS of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's BaseFS +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { + if container.BaseFS == nil { + return "", errors.New("GetResourcePath: BaseFS of container " + container.ID + " is unexpectedly nil") + } + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + r, e := container.BaseFS.ResolveScopedPath(path, false) + + // Log this here on the daemon side as there's otherwise no indication apart + // from the error being propagated all the way back to the client. This makes + // debugging significantly easier and clearly indicates the error comes from the daemon. + if e != nil { + logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS.Path(), path, e) + } + return r, e +} + +// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) +} + +// ExitOnNext signals to the monitor that it should not restart the container +// after we send the kill signal. +func (container *Container) ExitOnNext() { + container.RestartManager().Cancel() +} + +// HostConfigPath returns the path to the container's JSON hostconfig +func (container *Container) HostConfigPath() (string, error) { + return container.GetRootResourcePath("hostconfig.json") +} + +// ConfigPath returns the path to the container's JSON config +func (container *Container) ConfigPath() (string, error) { + return container.GetRootResourcePath(configFileName) +} + +// CheckpointDir returns the directory checkpoints are stored in +func (container *Container) CheckpointDir() string { + return filepath.Join(container.Root, "checkpoints") +} + +// StartLogger starts a new logger driver for the container. +func (container *Container) StartLogger() (logger.Logger, error) { + cfg := container.HostConfig.LogConfig + initDriver, err := logger.GetLogDriver(cfg.Type) + if err != nil { + return nil, errors.Wrap(err, "failed to get logging factory") + } + info := logger.Info{ + Config: cfg.Config, + ContainerID: container.ID, + ContainerName: container.Name, + ContainerEntrypoint: container.Path, + ContainerArgs: container.Args, + ContainerImageID: container.ImageID.String(), + ContainerImageName: container.Config.Image, + ContainerCreated: container.Created, + ContainerEnv: container.Config.Env, + ContainerLabels: container.Config.Labels, + DaemonName: "docker", + } + + // Set logging file for "json-logger" + // TODO(@cpuguy83): Setup here based on log driver is a little weird. + switch cfg.Type { + case jsonfilelog.Name: + info.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + if err != nil { + return nil, err + } + + container.LogPath = info.LogPath + case local.Name: + // Do not set container.LogPath for the local driver + // This would expose the value to the API, which should not be done as it means + // that the log file implementation would become a stable API that cannot change. + logDir, err := container.GetRootResourcePath("local-logs") + if err != nil { + return nil, err + } + if err := os.MkdirAll(logDir, 0700); err != nil { + return nil, errdefs.System(errors.Wrap(err, "error creating local logs dir")) + } + info.LogPath = filepath.Join(logDir, "container.log") + } + + l, err := initDriver(info) + if err != nil { + return nil, err + } + + if containertypes.LogMode(cfg.Config["mode"]) == containertypes.LogModeNonBlock { + bufferSize := int64(-1) + if s, exists := cfg.Config["max-buffer-size"]; exists { + bufferSize, err = units.RAMInBytes(s) + if err != nil { + return nil, err + } + } + l = logger.NewRingLogger(l, info, bufferSize) + } + return l, nil +} + +// GetProcessLabel returns the process label for the container. +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.HostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +// GetMountLabel returns the mounting label for the container. +// This label is empty if the container is privileged. +func (container *Container) GetMountLabel() string { + return container.MountLabel +} + +// GetExecIDs returns the list of exec commands running on the container. +func (container *Container) GetExecIDs() []string { + return container.ExecCommands.List() +} + +// ShouldRestart decides whether the daemon should restart the container or not. +// This is based on the container's restart policy. +func (container *Container) ShouldRestart() bool { + shouldRestart, _, _ := container.RestartManager().ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt)) + return shouldRestart +} + +// AddMountPointWithVolume adds a new mount point configured with a volume to the container. +func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + operatingSystem := container.OS + if operatingSystem == "" { + operatingSystem = runtime.GOOS + } + volumeParser := volumemounts.NewParser(operatingSystem) + container.MountPoints[destination] = &volumemounts.MountPoint{ + Type: mounttypes.TypeVolume, + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + CopyData: volumeParser.DefaultCopyMode(), + } +} + +// UnmountVolumes unmounts all volumes +func (container *Container) UnmountVolumes(volumeEventLog func(name, action string, attributes map[string]string)) error { + var errors []string + for _, volumeMount := range container.MountPoints { + if volumeMount.Volume == nil { + continue + } + + if err := volumeMount.Cleanup(); err != nil { + errors = append(errors, err.Error()) + continue + } + + attributes := map[string]string{ + "driver": volumeMount.Volume.DriverName(), + "container": container.ID, + } + volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + } + if len(errors) > 0 { + return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errors, "; ")) + } + return nil +} + +// IsDestinationMounted checks whether a path is mounted on the container or not. +func (container *Container) IsDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +// StopSignal returns the signal used to stop the container. +func (container *Container) StopSignal() int { + var stopSignal syscall.Signal + if container.Config.StopSignal != "" { + stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) + } + + if int(stopSignal) == 0 { + stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) + } + return int(stopSignal) +} + +// StopTimeout returns the timeout (in seconds) used to stop the container. +func (container *Container) StopTimeout() int { + if container.Config.StopTimeout != nil { + return *container.Config.StopTimeout + } + return DefaultStopTimeout +} + +// InitDNSHostConfig ensures that the dns fields are never nil. +// New containers don't ever have those fields nil, +// but pre created containers can still have those nil values. +// The non-recommended host configuration in the start api can +// make these fields nil again, this corrects that issue until +// we remove that behavior for good. +// See https://github.com/docker/docker/pull/17779 +// for a more detailed explanation on why we don't want that. +func (container *Container) InitDNSHostConfig() { + container.Lock() + defer container.Unlock() + if container.HostConfig.DNS == nil { + container.HostConfig.DNS = make([]string, 0) + } + + if container.HostConfig.DNSSearch == nil { + container.HostConfig.DNSSearch = make([]string, 0) + } + + if container.HostConfig.DNSOptions == nil { + container.HostConfig.DNSOptions = make([]string, 0) + } +} + +// UpdateMonitor updates monitor configure for running container +func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { + type policySetter interface { + SetPolicy(containertypes.RestartPolicy) + } + + if rm, ok := container.RestartManager().(policySetter); ok { + rm.SetPolicy(restartPolicy) + } +} + +// FullHostname returns hostname and optional domain appended to it. +func (container *Container) FullHostname() string { + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + return fullHostname +} + +// RestartManager returns the current restartmanager instance connected to container. +func (container *Container) RestartManager() restartmanager.RestartManager { + if container.restartManager == nil { + container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy, container.RestartCount) + } + return container.restartManager +} + +// ResetRestartManager initializes new restartmanager based on container config +func (container *Container) ResetRestartManager(resetCount bool) { + if container.restartManager != nil { + container.restartManager.Cancel() + } + if resetCount { + container.RestartCount = 0 + } + container.restartManager = nil +} + +type attachContext struct { + ctx context.Context + cancel context.CancelFunc + mu sync.Mutex +} + +// InitAttachContext initializes or returns existing context for attach calls to +// track container liveness. +func (container *Container) InitAttachContext() context.Context { + container.attachContext.mu.Lock() + defer container.attachContext.mu.Unlock() + if container.attachContext.ctx == nil { + container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) + } + return container.attachContext.ctx +} + +// CancelAttachContext cancels attach context. All attach calls should detach +// after this call. +func (container *Container) CancelAttachContext() { + container.attachContext.mu.Lock() + if container.attachContext.ctx != nil { + container.attachContext.cancel() + container.attachContext.ctx = nil + } + container.attachContext.mu.Unlock() +} + +func (container *Container) startLogging() error { + if container.HostConfig.LogConfig.Type == "none" { + return nil // do not start logging routines + } + + l, err := container.StartLogger() + if err != nil { + return fmt.Errorf("failed to initialize logging driver: %v", err) + } + + copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + container.LogCopier = copier + copier.Run() + container.LogDriver = l + + return nil +} + +// StdinPipe gets the stdin stream of the container +func (container *Container) StdinPipe() io.WriteCloser { + return container.StreamConfig.StdinPipe() +} + +// StdoutPipe gets the stdout stream of the container +func (container *Container) StdoutPipe() io.ReadCloser { + return container.StreamConfig.StdoutPipe() +} + +// StderrPipe gets the stderr stream of the container +func (container *Container) StderrPipe() io.ReadCloser { + return container.StreamConfig.StderrPipe() +} + +// CloseStreams closes the container's stdio streams +func (container *Container) CloseStreams() error { + return container.StreamConfig.CloseStreams() +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (container *Container) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) { + if err := container.startLogging(); err != nil { + container.Reset(false) + return nil, err + } + + container.StreamConfig.CopyToPipe(iop) + + if container.StreamConfig.Stdin() == nil && !container.Config.Tty { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("error closing stdin: %+v", err) + } + } + } + + return &rio{IO: iop, sc: container.StreamConfig}, nil +} + +// MountsResourcePath returns the path where mounts are stored for the given mount +func (container *Container) MountsResourcePath(mount string) (string, error) { + return container.GetRootResourcePath(filepath.Join("mounts", mount)) +} + +// SecretMountPath returns the path of the secret mount for the container +func (container *Container) SecretMountPath() (string, error) { + return container.MountsResourcePath("secrets") +} + +// SecretFilePath returns the path to the location of a secret on the host. +func (container *Container) SecretFilePath(secretRef swarmtypes.SecretReference) (string, error) { + secrets, err := container.SecretMountPath() + if err != nil { + return "", err + } + return filepath.Join(secrets, secretRef.SecretID), nil +} + +func getSecretTargetPath(r *swarmtypes.SecretReference) string { + if filepath.IsAbs(r.File.Name) { + return r.File.Name + } + + return filepath.Join(containerSecretMountPath, r.File.Name) +} + +// CreateDaemonEnvironment creates a new environment variable slice for this container. +func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string { + // Setup environment + os := container.OS + if os == "" { + os = runtime.GOOS + } + env := []string{} + if runtime.GOOS != "windows" || (runtime.GOOS == "windows" && os == "linux") { + env = []string{ + "PATH=" + system.DefaultPathEnv(os), + "HOSTNAME=" + container.Config.Hostname, + } + if tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + } + + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = ReplaceOrAppendEnvValues(env, container.Config.Env) + return env +} + +type rio struct { + cio.IO + + sc *stream.Config +} + +func (i *rio) Close() error { + i.IO.Close() + + return i.sc.CloseStreams() +} + +func (i *rio) Wait() { + i.sc.Wait() + + i.IO.Wait() +} diff --git a/extender/code/vendor/github.com/docker/docker/container/container_unix.go b/extender/code/vendor/github.com/docker/docker/container/container_unix.go new file mode 100644 index 000000000..c9f436510 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/container_unix.go @@ -0,0 +1,478 @@ +// +build !windows + +package container // import "github.com/docker/docker/container" + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + + "github.com/containerd/continuity/fs" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + volumemounts "github.com/docker/docker/volume/mounts" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const ( + // DefaultStopTimeout sets the default time, in seconds, to wait + // for the graceful container stop before forcefully terminating it. + DefaultStopTimeout = 10 + + containerSecretMountPath = "/run/secrets" +) + +// TrySetNetworkMount attempts to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a network mount file +func (container *Container) TrySetNetworkMount(destination string, path string) bool { + if destination == "/etc/resolv.conf" { + container.ResolvConfPath = path + return true + } + if destination == "/etc/hostname" { + container.HostnamePath = path + return true + } + if destination == "/etc/hosts" { + container.HostsPath = path + return true + } + + return false +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + hostnamePath, err := container.GetRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +// NetworkMounts returns the list of network mounts. +func (container *Container) NetworkMounts() []Mount { + var mounts []Mount + shared := container.HostConfig.NetworkMode.IsContainer() + parser := volumemounts.NewParser(container.OS) + if container.ResolvConfPath != "" { + if _, err := os.Stat(container.ResolvConfPath); err != nil { + logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + } else { + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { + writable = m.RW + } else { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) + } + mounts = append(mounts, Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: writable, + Propagation: string(parser.DefaultPropagationMode()), + }) + } + } + if container.HostnamePath != "" { + if _, err := os.Stat(container.HostnamePath); err != nil { + logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + } else { + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hostname"]; exists { + writable = m.RW + } else { + label.Relabel(container.HostnamePath, container.MountLabel, shared) + } + mounts = append(mounts, Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: writable, + Propagation: string(parser.DefaultPropagationMode()), + }) + } + } + if container.HostsPath != "" { + if _, err := os.Stat(container.HostsPath); err != nil { + logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + } else { + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hosts"]; exists { + writable = m.RW + } else { + label.Relabel(container.HostsPath, container.MountLabel, shared) + } + mounts = append(mounts, Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: writable, + Propagation: string(parser.DefaultPropagationMode()), + }) + } + } + return mounts +} + +// CopyImagePathContent copies files in destination to the volume. +func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := container.GetResourcePath(destination) + if err != nil { + return err + } + + if _, err := os.Stat(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + id := stringid.GenerateNonCryptoID() + path, err := v.Mount(id) + if err != nil { + return err + } + + defer func() { + if err := v.Unmount(id); err != nil { + logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) + } + }() + if err := label.Relabel(path, container.MountLabel, true); err != nil && err != unix.ENOTSUP { + return err + } + return copyExistingContents(rootfs, path) +} + +// ShmResourcePath returns path to shm +func (container *Container) ShmResourcePath() (string, error) { + return container.MountsResourcePath("shm") +} + +// HasMountFor checks if path is a mountpoint +func (container *Container) HasMountFor(path string) bool { + _, exists := container.MountPoints[path] + if exists { + return true + } + + // Also search among the tmpfs mounts + for dest := range container.HostConfig.Tmpfs { + if dest == path { + return true + } + } + + return false +} + +// UnmountIpcMount unmounts shm if it was mounted +func (container *Container) UnmountIpcMount() error { + if container.HasMountFor("/dev/shm") { + return nil + } + + // container.ShmPath should not be used here as it may point + // to the host's or other container's /dev/shm + shmPath, err := container.ShmResourcePath() + if err != nil { + return err + } + if shmPath == "" { + return nil + } + if err = mount.Unmount(shmPath); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// IpcMounts returns the list of IPC mounts +func (container *Container) IpcMounts() []Mount { + var mounts []Mount + parser := volumemounts.NewParser(container.OS) + + if container.HasMountFor("/dev/shm") { + return mounts + } + if container.ShmPath == "" { + return mounts + } + + label.SetFileLabel(container.ShmPath, container.MountLabel) + mounts = append(mounts, Mount{ + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, + Propagation: string(parser.DefaultPropagationMode()), + }) + + return mounts +} + +// SecretMounts returns the mounts for the secret path. +func (container *Container) SecretMounts() ([]Mount, error) { + var mounts []Mount + for _, r := range container.SecretReferences { + if r.File == nil { + continue + } + src, err := container.SecretFilePath(*r) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: src, + Destination: getSecretTargetPath(r), + Writable: false, + }) + } + for _, r := range container.ConfigReferences { + fPath, err := container.ConfigFilePath(*r) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: fPath, + Destination: r.File.Name, + Writable: false, + }) + } + + return mounts, nil +} + +// UnmountSecrets unmounts the local tmpfs for secrets +func (container *Container) UnmountSecrets() error { + p, err := container.SecretMountPath() + if err != nil { + return err + } + if _, err := os.Stat(p); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + return mount.RecursiveUnmount(p) +} + +type conflictingUpdateOptions string + +func (e conflictingUpdateOptions) Error() string { + return string(e) +} + +func (e conflictingUpdateOptions) Conflict() {} + +// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + // update resources of container + resources := hostConfig.Resources + cResources := &container.HostConfig.Resources + + // validate NanoCPUs, CPUPeriod, and CPUQuota + // Because NanoCPU effectively updates CPUPeriod/CPUQuota, + // once NanoCPU is already set, updating CPUPeriod/CPUQuota will be blocked, and vice versa. + // In the following we make sure the intended update (resources) does not conflict with the existing (cResource). + if resources.NanoCPUs > 0 && cResources.CPUPeriod > 0 { + return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Period has already been set") + } + if resources.NanoCPUs > 0 && cResources.CPUQuota > 0 { + return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Quota has already been set") + } + if resources.CPUPeriod > 0 && cResources.NanoCPUs > 0 { + return conflictingUpdateOptions("Conflicting options: CPU Period cannot be updated as NanoCPUs has already been set") + } + if resources.CPUQuota > 0 && cResources.NanoCPUs > 0 { + return conflictingUpdateOptions("Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set") + } + + if resources.BlkioWeight != 0 { + cResources.BlkioWeight = resources.BlkioWeight + } + if resources.CPUShares != 0 { + cResources.CPUShares = resources.CPUShares + } + if resources.NanoCPUs != 0 { + cResources.NanoCPUs = resources.NanoCPUs + } + if resources.CPUPeriod != 0 { + cResources.CPUPeriod = resources.CPUPeriod + } + if resources.CPUQuota != 0 { + cResources.CPUQuota = resources.CPUQuota + } + if resources.CpusetCpus != "" { + cResources.CpusetCpus = resources.CpusetCpus + } + if resources.CpusetMems != "" { + cResources.CpusetMems = resources.CpusetMems + } + if resources.Memory != 0 { + // if memory limit smaller than already set memoryswap limit and doesn't + // update the memoryswap limit, then error out. + if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 { + return conflictingUpdateOptions("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time") + } + cResources.Memory = resources.Memory + } + if resources.MemorySwap != 0 { + cResources.MemorySwap = resources.MemorySwap + } + if resources.MemoryReservation != 0 { + cResources.MemoryReservation = resources.MemoryReservation + } + if resources.KernelMemory != 0 { + cResources.KernelMemory = resources.KernelMemory + } + if resources.CPURealtimePeriod != 0 { + cResources.CPURealtimePeriod = resources.CPURealtimePeriod + } + if resources.CPURealtimeRuntime != 0 { + cResources.CPURealtimeRuntime = resources.CPURealtimeRuntime + } + if resources.PidsLimit != nil { + cResources.PidsLimit = resources.PidsLimit + } + + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return conflictingUpdateOptions("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + + return nil +} + +// DetachAndUnmount uses a detached mount on all mount destinations, then +// unmounts each volume normally. +// This is used from daemon/archive for `docker cp` +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + networkMounts := container.NetworkMounts() + mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts)) + + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, m := range networkMounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, mountPath := range mountPaths { + if err := mount.Unmount(mountPath); err != nil { + logrus.WithError(err).WithField("container", container.ID). + Warn("Unable to unmount") + } + } + return container.UnmountVolumes(volumeEventLog) +} + +// ignoreUnsupportedXAttrs ignores errors when extended attributes +// are not supported +func ignoreUnsupportedXAttrs() fs.CopyDirOpt { + xeh := func(dst, src, xattrKey string, err error) error { + if errors.Cause(err) != syscall.ENOTSUP { + return err + } + return nil + } + return fs.WithXAttrErrorHandler(xeh) +} + +// copyExistingContents copies from the source to the destination and +// ensures the ownership is appropriately set. +func copyExistingContents(source, destination string) error { + dstList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(dstList) != 0 { + // destination is not empty, do not copy + return nil + } + return fs.CopyDir(destination, source, ignoreUnsupportedXAttrs()) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + parser := volumemounts.NewParser(container.OS) + var mounts []Mount + for dest, data := range container.HostConfig.Tmpfs { + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + for dest, mnt := range container.MountPoints { + if mnt.Type == mounttypes.TypeTmpfs { + data, err := parser.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + } + return mounts, nil +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return false +} + +// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock. +func (container *Container) GetMountPoints() []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Mode, + RW: m.RW, + Propagation: m.Propagation, + }) + } + return mountPoints +} + +// ConfigFilePath returns the path to the on-disk location of a config. +// On unix, configs are always considered secret +func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) (string, error) { + mounts, err := container.SecretMountPath() + if err != nil { + return "", err + } + return filepath.Join(mounts, configRef.ConfigID), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/container/container_windows.go b/extender/code/vendor/github.com/docker/docker/container/container_windows.go new file mode 100644 index 000000000..9d7b94928 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/container_windows.go @@ -0,0 +1,213 @@ +package container // import "github.com/docker/docker/container" + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/system" +) + +const ( + containerSecretMountPath = `C:\ProgramData\Docker\secrets` + containerInternalSecretMountPath = `C:\ProgramData\Docker\internal\secrets` + containerInternalConfigsDirPath = `C:\ProgramData\Docker\internal\configs` + + // DefaultStopTimeout is the timeout (in seconds) for the shutdown call on a container + DefaultStopTimeout = 30 +) + +// UnmountIpcMount unmounts Ipc related mounts. +// This is a NOOP on windows. +func (container *Container) UnmountIpcMount() error { + return nil +} + +// IpcMounts returns the list of Ipc related mounts. +func (container *Container) IpcMounts() []Mount { + return nil +} + +// CreateSecretSymlinks creates symlinks to files in the secret mount. +func (container *Container) CreateSecretSymlinks() error { + for _, r := range container.SecretReferences { + if r.File == nil { + continue + } + resolvedPath, _, err := container.ResolvePath(getSecretTargetPath(r)) + if err != nil { + return err + } + if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil { + return err + } + if err := os.Symlink(filepath.Join(containerInternalSecretMountPath, r.SecretID), resolvedPath); err != nil { + return err + } + } + + return nil +} + +// SecretMounts returns the mount for the secret path. +// All secrets are stored in a single mount on Windows. Target symlinks are +// created for each secret, pointing to the files in this mount. +func (container *Container) SecretMounts() ([]Mount, error) { + var mounts []Mount + if len(container.SecretReferences) > 0 { + src, err := container.SecretMountPath() + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: src, + Destination: containerInternalSecretMountPath, + Writable: false, + }) + } + + return mounts, nil +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + p, err := container.SecretMountPath() + if err != nil { + return err + } + return os.RemoveAll(p) +} + +// CreateConfigSymlinks creates symlinks to files in the config mount. +func (container *Container) CreateConfigSymlinks() error { + for _, configRef := range container.ConfigReferences { + if configRef.File == nil { + continue + } + resolvedPath, _, err := container.ResolvePath(configRef.File.Name) + if err != nil { + return err + } + if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil { + return err + } + if err := os.Symlink(filepath.Join(containerInternalConfigsDirPath, configRef.ConfigID), resolvedPath); err != nil { + return err + } + } + + return nil +} + +// ConfigMounts returns the mount for configs. +// TODO: Right now Windows doesn't really have a "secure" storage for secrets, +// however some configs may contain secrets. Once secure storage is worked out, +// configs and secret handling should be merged. +func (container *Container) ConfigMounts() []Mount { + var mounts []Mount + if len(container.ConfigReferences) > 0 { + mounts = append(mounts, Mount{ + Source: container.ConfigsDirPath(), + Destination: containerInternalConfigsDirPath, + Writable: false, + }) + } + + return mounts +} + +// DetachAndUnmount unmounts all volumes. +// On Windows it only delegates to `UnmountVolumes` since there is nothing to +// force unmount. +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + return container.UnmountVolumes(volumeEventLog) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + return mounts, nil +} + +// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + resources := hostConfig.Resources + if resources.CPUShares != 0 || + resources.Memory != 0 || + resources.NanoCPUs != 0 || + resources.CgroupParent != "" || + resources.BlkioWeight != 0 || + len(resources.BlkioWeightDevice) != 0 || + len(resources.BlkioDeviceReadBps) != 0 || + len(resources.BlkioDeviceWriteBps) != 0 || + len(resources.BlkioDeviceReadIOps) != 0 || + len(resources.BlkioDeviceWriteIOps) != 0 || + resources.CPUPeriod != 0 || + resources.CPUQuota != 0 || + resources.CPURealtimePeriod != 0 || + resources.CPURealtimeRuntime != 0 || + resources.CpusetCpus != "" || + resources.CpusetMems != "" || + len(resources.Devices) != 0 || + len(resources.DeviceCgroupRules) != 0 || + resources.DiskQuota != 0 || + resources.KernelMemory != 0 || + resources.MemoryReservation != 0 || + resources.MemorySwap != 0 || + resources.MemorySwappiness != nil || + resources.OomKillDisable != nil || + (resources.PidsLimit != nil && *resources.PidsLimit != 0) || + len(resources.Ulimits) != 0 || + resources.CPUCount != 0 || + resources.CPUPercent != 0 || + resources.IOMaximumIOps != 0 || + resources.IOMaximumBandwidth != 0 { + return fmt.Errorf("resource updating isn't supported on Windows") + } + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + return nil +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + return nil +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return true +} + +// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock. +func (container *Container) GetMountPoints() []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +func (container *Container) ConfigsDirPath() string { + return filepath.Join(container.Root, "configs") +} + +// ConfigFilePath returns the path to the on-disk location of a config. +func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) string { + return filepath.Join(container.ConfigsDirPath(), configRef.ConfigID) +} diff --git a/extender/code/vendor/github.com/docker/docker/container/env.go b/extender/code/vendor/github.com/docker/docker/container/env.go new file mode 100644 index 000000000..d225fd147 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/env.go @@ -0,0 +1,43 @@ +package container // import "github.com/docker/docker/container" + +import ( + "strings" +) + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} diff --git a/extender/code/vendor/github.com/docker/docker/container/health.go b/extender/code/vendor/github.com/docker/docker/container/health.go new file mode 100644 index 000000000..167ee9b47 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/health.go @@ -0,0 +1,82 @@ +package container // import "github.com/docker/docker/container" + +import ( + "sync" + + "github.com/docker/docker/api/types" + "github.com/sirupsen/logrus" +) + +// Health holds the current container health-check state +type Health struct { + types.Health + stop chan struct{} // Write struct{} to stop the monitor + mu sync.Mutex +} + +// String returns a human-readable description of the health-check state +func (s *Health) String() string { + status := s.Status() + + switch status { + case types.Starting: + return "health: starting" + default: // Healthy and Unhealthy are clear on their own + return s.Health.Status + } +} + +// Status returns the current health status. +// +// Note that this takes a lock and the value may change after being read. +func (s *Health) Status() string { + s.mu.Lock() + defer s.mu.Unlock() + + // This happens when the monitor has yet to be setup. + if s.Health.Status == "" { + return types.Unhealthy + } + + return s.Health.Status +} + +// SetStatus writes the current status to the underlying health structure, +// obeying the locking semantics. +// +// Status may be set directly if another lock is used. +func (s *Health) SetStatus(new string) { + s.mu.Lock() + defer s.mu.Unlock() + + s.Health.Status = new +} + +// OpenMonitorChannel creates and returns a new monitor channel. If there +// already is one, it returns nil. +func (s *Health) OpenMonitorChannel() chan struct{} { + s.mu.Lock() + defer s.mu.Unlock() + + if s.stop == nil { + logrus.Debug("OpenMonitorChannel") + s.stop = make(chan struct{}) + return s.stop + } + return nil +} + +// CloseMonitorChannel closes any existing monitor channel. +func (s *Health) CloseMonitorChannel() { + s.mu.Lock() + defer s.mu.Unlock() + + if s.stop != nil { + logrus.Debug("CloseMonitorChannel: waiting for probe to stop") + close(s.stop) + s.stop = nil + // unhealthy when the monitor has stopped for compatibility reasons + s.Health.Status = types.Unhealthy + logrus.Debug("CloseMonitorChannel done") + } +} diff --git a/extender/code/vendor/github.com/docker/docker/container/history.go b/extender/code/vendor/github.com/docker/docker/container/history.go new file mode 100644 index 000000000..7117d9a43 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/history.go @@ -0,0 +1,30 @@ +package container // import "github.com/docker/docker/container" + +import "sort" + +// History is a convenience type for storing a list of containers, +// sorted by creation date in descendant order. +type History []*Container + +// Len returns the number of containers in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two containers and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +// Swap switches containers i and j positions in the history. +func (history *History) Swap(i, j int) { + containers := *history + containers[i], containers[j] = containers[j], containers[i] +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/extender/code/vendor/github.com/docker/docker/container/memory_store.go b/extender/code/vendor/github.com/docker/docker/container/memory_store.go new file mode 100644 index 000000000..ad4c9e20f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/memory_store.go @@ -0,0 +1,95 @@ +package container // import "github.com/docker/docker/container" + +import ( + "sync" +) + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Container + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Store { + return &memoryStore{ + s: make(map[string]*Container), + } +} + +// Add appends a new container to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a container from the store by id. +func (c *memoryStore) Get(id string) *Container { + var res *Container + c.RLock() + res = c.s[id] + c.RUnlock() + return res +} + +// Delete removes a container from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of containers from the store. +// The containers are ordered by creation date. +func (c *memoryStore) List() []*Container { + containers := History(c.all()) + containers.sort() + return containers +} + +// Size returns the number of containers in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first container found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Container { + for _, cont := range c.all() { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every container in the store. +// This operation is asynchronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + wg := new(sync.WaitGroup) + for _, cont := range c.all() { + wg.Add(1) + go func(container *Container) { + apply(container) + wg.Done() + }(cont) + } + + wg.Wait() +} + +func (c *memoryStore) all() []*Container { + c.RLock() + containers := make([]*Container, 0, len(c.s)) + for _, cont := range c.s { + containers = append(containers, cont) + } + c.RUnlock() + return containers +} + +var _ Store = &memoryStore{} diff --git a/extender/code/vendor/github.com/docker/docker/container/monitor.go b/extender/code/vendor/github.com/docker/docker/container/monitor.go new file mode 100644 index 000000000..1735e3487 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/monitor.go @@ -0,0 +1,46 @@ +package container // import "github.com/docker/docker/container" + +import ( + "time" + + "github.com/sirupsen/logrus" +) + +const ( + loggerCloseTimeout = 10 * time.Second +) + +// Reset puts a container into a state where it can be restarted again. +func (container *Container) Reset(lock bool) { + if lock { + container.Lock() + defer container.Unlock() + } + + if err := container.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", container.ID, err) + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.StreamConfig.NewInputPipes() + } + + if container.LogDriver != nil { + if container.LogCopier != nil { + exit := make(chan struct{}) + go func() { + container.LogCopier.Wait() + close(exit) + }() + select { + case <-time.After(loggerCloseTimeout): + logrus.Warn("Logger didn't exit in time: logs may be truncated") + case <-exit: + } + } + container.LogDriver.Close() + container.LogCopier = nil + container.LogDriver = nil + } +} diff --git a/extender/code/vendor/github.com/docker/docker/container/mounts_unix.go b/extender/code/vendor/github.com/docker/docker/container/mounts_unix.go new file mode 100644 index 000000000..2c1160464 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/mounts_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package container // import "github.com/docker/docker/container" + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Data string `json:"data"` + Propagation string `json:"mountpropagation"` + NonRecursive bool `json:"nonrecursive"` +} diff --git a/extender/code/vendor/github.com/docker/docker/container/mounts_windows.go b/extender/code/vendor/github.com/docker/docker/container/mounts_windows.go new file mode 100644 index 000000000..8f27e8806 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/mounts_windows.go @@ -0,0 +1,8 @@ +package container // import "github.com/docker/docker/container" + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` +} diff --git a/extender/code/vendor/github.com/docker/docker/container/state.go b/extender/code/vendor/github.com/docker/docker/container/state.go new file mode 100644 index 000000000..7c2a1ec81 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/state.go @@ -0,0 +1,409 @@ +package container // import "github.com/docker/docker/container" + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/go-units" +) + +// State holds the current container state, and has methods to get and +// set the state. Container has an embed, which allows all of the +// functions defined against State to run against Container. +type State struct { + sync.Mutex + // Note that `Running` and `Paused` are not mutually exclusive: + // When pausing a container (on Linux), the cgroups freezer is used to suspend + // all processes in the container. Freezing the process requires the process to + // be running. As a result, paused containers are both `Running` _and_ `Paused`. + Running bool + Paused bool + Restarting bool + OOMKilled bool + RemovalInProgress bool // Not need for this to be persistent on disk. + Dead bool + Pid int + ExitCodeValue int `json:"ExitCode"` + ErrorMsg string `json:"Error"` // contains last known error during container start, stop, or remove + StartedAt time.Time + FinishedAt time.Time + Health *Health + + waitStop chan struct{} + waitRemove chan struct{} +} + +// StateStatus is used to return container wait results. +// Implements exec.ExitCode interface. +// This type is needed as State include a sync.Mutex field which make +// copying it unsafe. +type StateStatus struct { + exitCode int + err error +} + +// ExitCode returns current exitcode for the state. +func (s StateStatus) ExitCode() int { + return s.exitCode +} + +// Err returns current error for the state. Returns nil if the container had +// exited on its own. +func (s StateStatus) Err() error { + return s.err +} + +// NewState creates a default state object with a fresh channel for state changes. +func NewState() *State { + return &State{ + waitStop: make(chan struct{}), + waitRemove: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + if h := s.Health; h != nil { + return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String()) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// IsValidHealthString checks if the provided string is a valid container health status or not. +func IsValidHealthString(s string) bool { + return s == types.Starting || + s == types.Healthy || + s == types.Unhealthy || + s == types.NoHealthcheck +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.RemovalInProgress { + return "removing" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// IsValidStateString checks if the provided string is a valid container state or not. +func IsValidStateString(s string) bool { + if s != "paused" && + s != "restarting" && + s != "removing" && + s != "running" && + s != "dead" && + s != "created" && + s != "exited" { + return false + } + return true +} + +// WaitCondition is an enum type for different states to wait for. +type WaitCondition int + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = iota + WaitConditionNextExit + WaitConditionRemoved +) + +// Wait waits until the container is in a certain state indicated by the given +// condition. A context must be used for cancelling the request, controlling +// timeouts, and avoiding goroutine leaks. Wait must be called without holding +// the state lock. Returns a channel from which the caller will receive the +// result. If the container exited on its own, the result's Err() method will +// be nil and its ExitCode() method will return the container's exit code, +// otherwise, the results Err() method will return an error indicating why the +// wait operation failed. +func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateStatus { + s.Lock() + defer s.Unlock() + + if condition == WaitConditionNotRunning && !s.Running { + // Buffer so we can put it in the channel now. + resultC := make(chan StateStatus, 1) + + // Send the current status. + resultC <- StateStatus{ + exitCode: s.ExitCode(), + err: s.Err(), + } + + return resultC + } + + // If we are waiting only for removal, the waitStop channel should + // remain nil and block forever. + var waitStop chan struct{} + if condition < WaitConditionRemoved { + waitStop = s.waitStop + } + + // Always wait for removal, just in case the container gets removed + // while it is still in a "created" state, in which case it is never + // actually stopped. + waitRemove := s.waitRemove + + resultC := make(chan StateStatus) + + go func() { + select { + case <-ctx.Done(): + // Context timeout or cancellation. + resultC <- StateStatus{ + exitCode: -1, + err: ctx.Err(), + } + return + case <-waitStop: + case <-waitRemove: + } + + s.Lock() + result := StateStatus{ + exitCode: s.ExitCode(), + err: s.Err(), + } + s.Unlock() + + resultC <- result + }() + + return resultC +} + +// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +// GetPID holds the process id of a container. +func (s *State) GetPID() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +// ExitCode returns current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) ExitCode() int { + return s.ExitCodeValue +} + +// SetExitCode sets current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) SetExitCode(ec int) { + s.ExitCodeValue = ec +} + +// SetRunning sets the state of the container to "running". +func (s *State) SetRunning(pid int, initial bool) { + s.ErrorMsg = "" + s.Paused = false + s.Running = true + s.Restarting = false + if initial { + s.Paused = false + } + s.ExitCodeValue = 0 + s.Pid = pid + if initial { + s.StartedAt = time.Now().UTC() + } +} + +// SetStopped sets the container state to "stopped" without locking. +func (s *State) SetStopped(exitStatus *ExitStatus) { + s.Running = false + s.Paused = false + s.Restarting = false + s.Pid = 0 + if exitStatus.ExitedAt.IsZero() { + s.FinishedAt = time.Now().UTC() + } else { + s.FinishedAt = exitStatus.ExitedAt + } + s.ExitCodeValue = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled + close(s.waitStop) // fire waiters for stop + s.waitStop = make(chan struct{}) +} + +// SetRestarting sets the container state to "restarting" without locking. +// It also sets the container PID to 0. +func (s *State) SetRestarting(exitStatus *ExitStatus) { + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Paused = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.ExitCodeValue = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled + close(s.waitStop) // fire waiters for stop + s.waitStop = make(chan struct{}) +} + +// SetError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) SetError(err error) { + s.ErrorMsg = "" + if err != nil { + s.ErrorMsg = err.Error() + } +} + +// IsPaused returns whether the container is paused or not. +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} + +// IsRestarting returns whether the container is restarting or not. +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +// SetRemovalInProgress sets the container state as being removed. +// It returns true if the container was already in that state. +func (s *State) SetRemovalInProgress() bool { + s.Lock() + defer s.Unlock() + if s.RemovalInProgress { + return true + } + s.RemovalInProgress = true + return false +} + +// ResetRemovalInProgress makes the RemovalInProgress state to false. +func (s *State) ResetRemovalInProgress() { + s.Lock() + s.RemovalInProgress = false + s.Unlock() +} + +// IsRemovalInProgress returns whether the RemovalInProgress flag is set. +// Used by Container to check whether a container is being removed. +func (s *State) IsRemovalInProgress() bool { + s.Lock() + res := s.RemovalInProgress + s.Unlock() + return res +} + +// SetDead sets the container state to "dead" +func (s *State) SetDead() { + s.Lock() + s.Dead = true + s.Unlock() +} + +// IsDead returns whether the Dead flag is set. Used by Container to check whether a container is dead. +func (s *State) IsDead() bool { + s.Lock() + res := s.Dead + s.Unlock() + return res +} + +// SetRemoved assumes this container is already in the "dead" state and +// closes the internal waitRemove channel to unblock callers waiting for a +// container to be removed. +func (s *State) SetRemoved() { + s.SetRemovalError(nil) +} + +// SetRemovalError is to be called in case a container remove failed. +// It sets an error and closes the internal waitRemove channel to unblock +// callers waiting for the container to be removed. +func (s *State) SetRemovalError(err error) { + s.SetError(err) + s.Lock() + close(s.waitRemove) // Unblock those waiting on remove. + // Recreate the channel so next ContainerWait will work + s.waitRemove = make(chan struct{}) + s.Unlock() +} + +// Err returns an error if there is one. +func (s *State) Err() error { + if s.ErrorMsg != "" { + return errors.New(s.ErrorMsg) + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/container/store.go b/extender/code/vendor/github.com/docker/docker/container/store.go new file mode 100644 index 000000000..3af038985 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/store.go @@ -0,0 +1,28 @@ +package container // import "github.com/docker/docker/container" + +// StoreFilter defines a function to filter +// container in the store. +type StoreFilter func(*Container) bool + +// StoreReducer defines a function to +// manipulate containers in the store +type StoreReducer func(*Container) + +// Store defines an interface that +// any container store must implement. +type Store interface { + // Add appends a new container to the store. + Add(string, *Container) + // Get returns a container from the store by the identifier it was stored with. + Get(string) *Container + // Delete removes a container from the store by the identifier it was stored with. + Delete(string) + // List returns a list of containers from the store. + List() []*Container + // Size returns the number of containers in the store. + Size() int + // First returns the first container found in the store by a given filter. + First(StoreFilter) *Container + // ApplyAll calls the reducer function with every container in the store. + ApplyAll(StoreReducer) +} diff --git a/extender/code/vendor/github.com/docker/docker/container/stream/attach.go b/extender/code/vendor/github.com/docker/docker/container/stream/attach.go new file mode 100644 index 000000000..1366dcb49 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/stream/attach.go @@ -0,0 +1,175 @@ +package stream // import "github.com/docker/docker/container/stream" + +import ( + "context" + "io" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +var defaultEscapeSequence = []byte{16, 17} // ctrl-p, ctrl-q + +// AttachConfig is the config struct used to attach a client to a stream's stdio +type AttachConfig struct { + // Tells the attach copier that the stream's stdin is a TTY and to look for + // escape sequences in stdin to detach from the stream. + // When true the escape sequence is not passed to the underlying stream + TTY bool + // Specifies the detach keys the client will be using + // Only useful when `TTY` is true + DetachKeys []byte + + // CloseStdin signals that once done, stdin for the attached stream should be closed + // For example, this would close the attached container's stdin. + CloseStdin bool + + // UseStd* indicate whether the client has requested to be connected to the + // given stream or not. These flags are used instead of checking Std* != nil + // at points before the client streams Std* are wired up. + UseStdin, UseStdout, UseStderr bool + + // CStd* are the streams directly connected to the container + CStdin io.WriteCloser + CStdout, CStderr io.ReadCloser + + // Provide client streams to wire up to + Stdin io.ReadCloser + Stdout, Stderr io.Writer +} + +// AttachStreams attaches the container's streams to the AttachConfig +func (c *Config) AttachStreams(cfg *AttachConfig) { + if cfg.UseStdin { + cfg.CStdin = c.StdinPipe() + } + + if cfg.UseStdout { + cfg.CStdout = c.StdoutPipe() + } + + if cfg.UseStderr { + cfg.CStderr = c.StderrPipe() + } +} + +// CopyStreams starts goroutines to copy data in and out to/from the container +func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan error { + var group errgroup.Group + + // Connect stdin of container to the attach stdin stream. + if cfg.Stdin != nil { + group.Go(func() error { + logrus.Debug("attach: stdin: begin") + defer logrus.Debug("attach: stdin: end") + + defer func() { + if cfg.CloseStdin && !cfg.TTY { + cfg.CStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cfg.CStdout != nil { + cfg.CStdout.Close() + } + if cfg.CStderr != nil { + cfg.CStderr.Close() + } + } + }() + + var err error + if cfg.TTY { + _, err = copyEscapable(cfg.CStdin, cfg.Stdin, cfg.DetachKeys) + } else { + _, err = pools.Copy(cfg.CStdin, cfg.Stdin) + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.WithError(err).Debug("error on attach stdin") + return errors.Wrap(err, "error on attach stdin") + } + return nil + }) + } + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) error { + logrus.Debugf("attach: %s: begin", name) + defer logrus.Debugf("attach: %s: end", name) + defer func() { + // Make sure stdin gets closed + if cfg.Stdin != nil { + cfg.Stdin.Close() + } + streamPipe.Close() + }() + + _, err := pools.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.WithError(err).Debugf("attach: %s", name) + return errors.Wrapf(err, "error attaching %s stream", name) + } + return nil + } + + if cfg.Stdout != nil { + group.Go(func() error { + return attachStream("stdout", cfg.Stdout, cfg.CStdout) + }) + } + if cfg.Stderr != nil { + group.Go(func() error { + return attachStream("stderr", cfg.Stderr, cfg.CStderr) + }) + } + + errs := make(chan error, 1) + go func() { + defer logrus.Debug("attach done") + groupErr := make(chan error, 1) + go func() { + groupErr <- group.Wait() + }() + select { + case <-ctx.Done(): + // close all pipes + if cfg.CStdin != nil { + cfg.CStdin.Close() + } + if cfg.CStdout != nil { + cfg.CStdout.Close() + } + if cfg.CStderr != nil { + cfg.CStderr.Close() + } + + // Now with these closed, wait should return. + if err := group.Wait(); err != nil { + errs <- err + return + } + errs <- ctx.Err() + case err := <-groupErr: + errs <- err + } + }() + + return errs +} + +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + keys = defaultEscapeSequence + } + pr := term.NewEscapeProxy(src, keys) + defer src.Close() + + return pools.Copy(dst, pr) +} diff --git a/extender/code/vendor/github.com/docker/docker/container/stream/streams.go b/extender/code/vendor/github.com/docker/docker/container/stream/streams.go new file mode 100644 index 000000000..d81867c1d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/stream/streams.go @@ -0,0 +1,146 @@ +package stream // import "github.com/docker/docker/container/stream" + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/containerd/containerd/cio" + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/sirupsen/logrus" +) + +// Config holds information about I/O streams managed together. +// +// config.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the streamConfig's active process. +// config.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". +type Config struct { + sync.WaitGroup + stdout *broadcaster.Unbuffered + stderr *broadcaster.Unbuffered + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +// NewConfig creates a stream config and initializes +// the standard err and standard out to new unbuffered broadcasters. +func NewConfig() *Config { + return &Config{ + stderr: new(broadcaster.Unbuffered), + stdout: new(broadcaster.Unbuffered), + } +} + +// Stdout returns the standard output in the configuration. +func (c *Config) Stdout() *broadcaster.Unbuffered { + return c.stdout +} + +// Stderr returns the standard error in the configuration. +func (c *Config) Stderr() *broadcaster.Unbuffered { + return c.stderr +} + +// Stdin returns the standard input in the configuration. +func (c *Config) Stdin() io.ReadCloser { + return c.stdin +} + +// StdinPipe returns an input writer pipe as an io.WriteCloser. +func (c *Config) StdinPipe() io.WriteCloser { + return c.stdinPipe +} + +// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new out pipe to the Stdout broadcaster. +// This will block stdout if unconsumed. +func (c *Config) StdoutPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stdout.Add(bytesPipe) + return bytesPipe +} + +// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new err pipe to the Stderr broadcaster. +// This will block stderr if unconsumed. +func (c *Config) StderrPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stderr.Add(bytesPipe) + return bytesPipe +} + +// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. +func (c *Config) NewInputPipes() { + c.stdin, c.stdinPipe = io.Pipe() +} + +// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. +func (c *Config) NewNopInputPipe() { + c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) +} + +// CloseStreams ensures that the configured streams are properly closed. +func (c *Config) CloseStreams() error { + var errors []string + + if c.stdin != nil { + if err := c.stdin.Close(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) + } + } + + if err := c.stdout.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) + } + + if err := c.stderr.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + + return nil +} + +// CopyToPipe connects streamconfig with a libcontainerd.IOPipe +func (c *Config) CopyToPipe(iop *cio.DirectIO) { + copyFunc := func(w io.Writer, r io.ReadCloser) { + c.Add(1) + go func() { + if _, err := pools.Copy(w, r); err != nil { + logrus.Errorf("stream copy error: %v", err) + } + r.Close() + c.Done() + }() + } + + if iop.Stdout != nil { + copyFunc(c.Stdout(), iop.Stdout) + } + if iop.Stderr != nil { + copyFunc(c.Stderr(), iop.Stderr) + } + + if stdin := c.Stdin(); stdin != nil { + if iop.Stdin != nil { + go func() { + pools.Copy(iop.Stdin, stdin) + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("failed to close stdin: %v", err) + } + }() + } + } +} diff --git a/extender/code/vendor/github.com/docker/docker/container/view.go b/extender/code/vendor/github.com/docker/docker/container/view.go new file mode 100644 index 000000000..aabb01923 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/container/view.go @@ -0,0 +1,494 @@ +package container // import "github.com/docker/docker/container" + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" + "github.com/hashicorp/go-memdb" + "github.com/sirupsen/logrus" +) + +const ( + memdbContainersTable = "containers" + memdbNamesTable = "names" + memdbIDIndex = "id" + memdbContainerIDIndex = "containerid" +) + +var ( + // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + ErrNameReserved = errors.New("name is reserved") + // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + ErrNameNotReserved = errors.New("name is not reserved") +) + +// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a +// versioned ACID in-memory store. +type Snapshot struct { + types.Container + + // additional info queries need to filter on + // preserve nanosec resolution for queries + CreatedAt time.Time + StartedAt time.Time + Name string + Pid int + ExitCode int + Running bool + Paused bool + Managed bool + ExposedPorts nat.PortSet + PortBindings nat.PortSet + Health string + HostConfig struct { + Isolation string + } +} + +// nameAssociation associates a container id with a name. +type nameAssociation struct { + // name is the name to associate. Note that name is the primary key + // ("id" in memdb). + name string + containerID string +} + +// ViewDB provides an in-memory transactional (ACID) container Store +type ViewDB interface { + Snapshot() View + Save(*Container) error + Delete(*Container) error + + ReserveName(name, containerID string) error + ReleaseName(name string) error +} + +// View can be used by readers to avoid locking +type View interface { + All() ([]Snapshot, error) + Get(id string) (*Snapshot, error) + + GetID(name string) (string, error) + GetAllNames() map[string][]string +} + +var schema = &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + memdbContainersTable: { + Name: memdbContainersTable, + Indexes: map[string]*memdb.IndexSchema{ + memdbIDIndex: { + Name: memdbIDIndex, + Unique: true, + Indexer: &containerByIDIndexer{}, + }, + }, + }, + memdbNamesTable: { + Name: memdbNamesTable, + Indexes: map[string]*memdb.IndexSchema{ + // Used for names, because "id" is the primary key in memdb. + memdbIDIndex: { + Name: memdbIDIndex, + Unique: true, + Indexer: &namesByNameIndexer{}, + }, + memdbContainerIDIndex: { + Name: memdbContainerIDIndex, + Indexer: &namesByContainerIDIndexer{}, + }, + }, + }, + }, +} + +type memDB struct { + store *memdb.MemDB +} + +// NoSuchContainerError indicates that the container wasn't found in the +// database. +type NoSuchContainerError struct { + id string +} + +// Error satisfies the error interface. +func (e NoSuchContainerError) Error() string { + return "no such container " + e.id +} + +// NewViewDB provides the default implementation, with the default schema +func NewViewDB() (ViewDB, error) { + store, err := memdb.NewMemDB(schema) + if err != nil { + return nil, err + } + return &memDB{store: store}, nil +} + +// Snapshot provides a consistent read-only View of the database +func (db *memDB) Snapshot() View { + return &memdbView{ + txn: db.store.Txn(false), + } +} + +func (db *memDB) withTxn(cb func(*memdb.Txn) error) error { + txn := db.store.Txn(true) + err := cb(txn) + if err != nil { + txn.Abort() + return err + } + txn.Commit() + return nil +} + +// Save atomically updates the in-memory store state for a Container. +// Only read only (deep) copies of containers may be passed in. +func (db *memDB) Save(c *Container) error { + return db.withTxn(func(txn *memdb.Txn) error { + return txn.Insert(memdbContainersTable, c) + }) +} + +// Delete removes an item by ID +func (db *memDB) Delete(c *Container) error { + return db.withTxn(func(txn *memdb.Txn) error { + view := &memdbView{txn: txn} + names := view.getNames(c.ID) + + for _, name := range names { + txn.Delete(memdbNamesTable, nameAssociation{name: name}) + } + + // Ignore error - the container may not actually exist in the + // db, but we still need to clean up associated names. + txn.Delete(memdbContainersTable, NewBaseContainer(c.ID, c.Root)) + return nil + }) +} + +// ReserveName registers a container ID to a name +// ReserveName is idempotent +// Attempting to reserve a container ID to a name that already exists results in an `ErrNameReserved` +// A name reservation is globally unique +func (db *memDB) ReserveName(name, containerID string) error { + return db.withTxn(func(txn *memdb.Txn) error { + s, err := txn.First(memdbNamesTable, memdbIDIndex, name) + if err != nil { + return err + } + if s != nil { + if s.(nameAssociation).containerID != containerID { + return ErrNameReserved + } + return nil + } + return txn.Insert(memdbNamesTable, nameAssociation{name: name, containerID: containerID}) + }) +} + +// ReleaseName releases the reserved name +// Once released, a name can be reserved again +func (db *memDB) ReleaseName(name string) error { + return db.withTxn(func(txn *memdb.Txn) error { + return txn.Delete(memdbNamesTable, nameAssociation{name: name}) + }) +} + +type memdbView struct { + txn *memdb.Txn +} + +// All returns a all items in this snapshot. Returned objects must never be modified. +func (v *memdbView) All() ([]Snapshot, error) { + var all []Snapshot + iter, err := v.txn.Get(memdbContainersTable, memdbIDIndex) + if err != nil { + return nil, err + } + for { + item := iter.Next() + if item == nil { + break + } + snapshot := v.transform(item.(*Container)) + all = append(all, *snapshot) + } + return all, nil +} + +// Get returns an item by id. Returned objects must never be modified. +func (v *memdbView) Get(id string) (*Snapshot, error) { + s, err := v.txn.First(memdbContainersTable, memdbIDIndex, id) + if err != nil { + return nil, err + } + if s == nil { + return nil, NoSuchContainerError{id: id} + } + return v.transform(s.(*Container)), nil +} + +// getNames lists all the reserved names for the given container ID. +func (v *memdbView) getNames(containerID string) []string { + iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex, containerID) + if err != nil { + return nil + } + + var names []string + for { + item := iter.Next() + if item == nil { + break + } + names = append(names, item.(nameAssociation).name) + } + + return names +} + +// GetID returns the container ID that the passed in name is reserved to. +func (v *memdbView) GetID(name string) (string, error) { + s, err := v.txn.First(memdbNamesTable, memdbIDIndex, name) + if err != nil { + return "", err + } + if s == nil { + return "", ErrNameNotReserved + } + return s.(nameAssociation).containerID, nil +} + +// GetAllNames returns all registered names. +func (v *memdbView) GetAllNames() map[string][]string { + iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex) + if err != nil { + return nil + } + + out := make(map[string][]string) + for { + item := iter.Next() + if item == nil { + break + } + assoc := item.(nameAssociation) + out[assoc.containerID] = append(out[assoc.containerID], assoc.name) + } + + return out +} + +// transform maps a (deep) copied Container object to what queries need. +// A lock on the Container is not held because these are immutable deep copies. +func (v *memdbView) transform(container *Container) *Snapshot { + health := types.NoHealthcheck + if container.Health != nil { + health = container.Health.Status() + } + snapshot := &Snapshot{ + Container: types.Container{ + ID: container.ID, + Names: v.getNames(container.ID), + ImageID: container.ImageID.String(), + Ports: []types.Port{}, + Mounts: container.GetMountPoints(), + State: container.State.StateString(), + Status: container.State.String(), + Created: container.Created.Unix(), + }, + CreatedAt: container.Created, + StartedAt: container.StartedAt, + Name: container.Name, + Pid: container.Pid, + Managed: container.Managed, + ExposedPorts: make(nat.PortSet), + PortBindings: make(nat.PortSet), + Health: health, + Running: container.Running, + Paused: container.Paused, + ExitCode: container.ExitCode(), + } + + if snapshot.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + snapshot.Names = []string{} + } + + if container.HostConfig != nil { + snapshot.Container.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation) + for binding := range container.HostConfig.PortBindings { + snapshot.PortBindings[binding] = struct{}{} + } + } + + if container.Config != nil { + snapshot.Image = container.Config.Image + snapshot.Labels = container.Config.Labels + for exposed := range container.Config.ExposedPorts { + snapshot.ExposedPorts[exposed] = struct{}{} + } + } + + if len(container.Args) > 0 { + var args []string + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + snapshot.Command = container.Path + } + + snapshot.Ports = []types.Port{} + networks := make(map[string]*network.EndpointSettings) + if container.NetworkSettings != nil { + for name, netw := range container.NetworkSettings.Networks { + if netw == nil || netw.EndpointSettings == nil { + continue + } + networks[name] = &network.EndpointSettings{ + EndpointID: netw.EndpointID, + Gateway: netw.Gateway, + IPAddress: netw.IPAddress, + IPPrefixLen: netw.IPPrefixLen, + IPv6Gateway: netw.IPv6Gateway, + GlobalIPv6Address: netw.GlobalIPv6Address, + GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen, + MacAddress: netw.MacAddress, + NetworkID: netw.NetworkID, + } + if netw.IPAMConfig != nil { + networks[name].IPAMConfig = &network.EndpointIPAMConfig{ + IPv4Address: netw.IPAMConfig.IPv4Address, + IPv6Address: netw.IPAMConfig.IPv6Address, + } + } + } + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + logrus.Warnf("invalid port map %+v", err) + continue + } + if len(bindings) == 0 { + snapshot.Ports = append(snapshot.Ports, types.Port{ + PrivatePort: uint16(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + logrus.Warnf("invalid host port map %+v", err) + continue + } + snapshot.Ports = append(snapshot.Ports, types.Port{ + PrivatePort: uint16(p), + PublicPort: uint16(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + } + snapshot.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + return snapshot +} + +// containerByIDIndexer is used to extract the ID field from Container types. +// memdb.StringFieldIndex can not be used since ID is a field from an embedded struct. +type containerByIDIndexer struct{} + +// FromObject implements the memdb.SingleIndexer interface for Container objects +func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { + c, ok := obj.(*Container) + if !ok { + return false, nil, fmt.Errorf("%T is not a Container", obj) + } + // Add the null character as a terminator + v := c.ID + "\x00" + return true, []byte(v), nil +} + +// FromArgs implements the memdb.Indexer interface +func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +// namesByNameIndexer is used to index container name associations by name. +type namesByNameIndexer struct{} + +func (e *namesByNameIndexer) FromObject(obj interface{}) (bool, []byte, error) { + n, ok := obj.(nameAssociation) + if !ok { + return false, nil, fmt.Errorf(`%T does not have type "nameAssociation"`, obj) + } + + // Add the null character as a terminator + return true, []byte(n.name + "\x00"), nil +} + +func (e *namesByNameIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +// namesByContainerIDIndexer is used to index container names by container ID. +type namesByContainerIDIndexer struct{} + +func (e *namesByContainerIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { + n, ok := obj.(nameAssociation) + if !ok { + return false, nil, fmt.Errorf(`%T does not have type "nameAssociation"`, obj) + } + + // Add the null character as a terminator + return true, []byte(n.containerID + "\x00"), nil +} + +func (e *namesByContainerIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/cluster/provider/network.go b/extender/code/vendor/github.com/docker/docker/daemon/cluster/provider/network.go new file mode 100644 index 000000000..533baa0e1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/cluster/provider/network.go @@ -0,0 +1,37 @@ +package provider // import "github.com/docker/docker/daemon/cluster/provider" + +import "github.com/docker/docker/api/types" + +// NetworkCreateRequest is a request when creating a network. +type NetworkCreateRequest struct { + ID string + types.NetworkCreateRequest +} + +// NetworkCreateResponse is a response when creating a network. +type NetworkCreateResponse struct { + ID string `json:"Id"` +} + +// VirtualAddress represents a virtual address. +type VirtualAddress struct { + IPv4 string + IPv6 string +} + +// PortConfig represents a port configuration. +type PortConfig struct { + Name string + Protocol int32 + TargetPort uint32 + PublishedPort uint32 +} + +// ServiceConfig represents a service configuration. +type ServiceConfig struct { + ID string + Name string + Aliases map[string][]string + VirtualAddresses map[string]*VirtualAddress + ExposedPorts []*PortConfig +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/exec/exec.go b/extender/code/vendor/github.com/docker/docker/daemon/exec/exec.go new file mode 100644 index 000000000..c036c46a0 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/exec/exec.go @@ -0,0 +1,146 @@ +package exec // import "github.com/docker/docker/daemon/exec" + +import ( + "runtime" + "sync" + + "github.com/containerd/containerd/cio" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/pkg/stringid" + "github.com/sirupsen/logrus" +) + +// Config holds the configurations for execs. The Daemon keeps +// track of both running and finished execs so that they can be +// examined both during and after completion. +type Config struct { + sync.Mutex + Started chan struct{} + StreamConfig *stream.Config + ID string + Running bool + ExitCode *int + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Entrypoint string + Args []string + Tty bool + Privileged bool + User string + WorkingDir string + Env []string + Pid int +} + +// NewConfig initializes the a new exec configuration +func NewConfig() *Config { + return &Config{ + ID: stringid.GenerateNonCryptoID(), + StreamConfig: stream.NewConfig(), + Started: make(chan struct{}), + } +} + +type rio struct { + cio.IO + + sc *stream.Config +} + +func (i *rio) Close() error { + i.IO.Close() + + return i.sc.CloseStreams() +} + +func (i *rio) Wait() { + i.sc.Wait() + + i.IO.Wait() +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (c *Config) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) { + c.StreamConfig.CopyToPipe(iop) + + if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Errorf("error closing exec stdin: %+v", err) + } + } + } + + return &rio{IO: iop, sc: c.StreamConfig}, nil +} + +// CloseStreams closes the stdio streams for the exec +func (c *Config) CloseStreams() error { + return c.StreamConfig.CloseStreams() +} + +// SetExitCode sets the exec config's exit code +func (c *Config) SetExitCode(code int) { + c.ExitCode = &code +} + +// Store keeps track of the exec configurations. +type Store struct { + byID map[string]*Config + sync.RWMutex +} + +// NewStore initializes a new exec store. +func NewStore() *Store { + return &Store{ + byID: make(map[string]*Config), + } +} + +// Commands returns the exec configurations in the store. +func (e *Store) Commands() map[string]*Config { + e.RLock() + byID := make(map[string]*Config, len(e.byID)) + for id, config := range e.byID { + byID[id] = config + } + e.RUnlock() + return byID +} + +// Add adds a new exec configuration to the store. +func (e *Store) Add(id string, Config *Config) { + e.Lock() + e.byID[id] = Config + e.Unlock() +} + +// Get returns an exec configuration by its id. +func (e *Store) Get(id string) *Config { + e.RLock() + res := e.byID[id] + e.RUnlock() + return res +} + +// Delete removes an exec configuration from the store. +func (e *Store) Delete(id string, pid int) { + e.Lock() + delete(e.byID, id) + e.Unlock() +} + +// List returns the list of exec ids in the store. +func (e *Store) List() []string { + var IDs []string + e.RLock() + for id := range e.byID { + IDs = append(IDs, id) + } + e.RUnlock() + return IDs +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/counter.go b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/counter.go new file mode 100644 index 000000000..2772bd247 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/counter.go @@ -0,0 +1,62 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increases the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count++ + }) +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count-- + }) +} + +func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + infoOp(m) + count := m.count + if count <= 0 { + delete(c.counts, path) + } + c.mu.Unlock() + return count +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver.go new file mode 100644 index 000000000..44434f7dc --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver.go @@ -0,0 +1,333 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string +} + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (fs containerfs.ContainerFS, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver +} + +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + } + + pluginDriver, err := lookupPlugin(name, pg, config) + if err == nil { + return pluginDriver, nil + } + logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver + logDeprecatedWarning(name) + return GetDriver(name, pg, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + list := strings.Split(priority, ",") + logrus.Debugf("[graphdriver] priority list: %v", list) + for _, name := range list { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + logDeprecatedWarning(name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range list { + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if IsDriverNotSupported(err) { + continue + } + return nil, err + } + logDeprecatedWarning(name) + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + if isDeprecated(name) { + // Deprecated storage-drivers are skipped in automatic selection, but + // can be selected through configuration. + continue + } + driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if IsDriverNotSupported(err) { + continue + } + return nil, err + } + logDeprecatedWarning(name) + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + if !isEmptyDir(p) { + driversMap[driver] = true + } + } + } + return driversMap +} + +// IsInitialized checks if the driver's home-directory exists and is non-empty. +func IsInitialized(driverHome string) bool { + _, err := os.Stat(driverHome) + if os.IsNotExist(err) { + return false + } + if err != nil { + logrus.Warnf("graphdriver.IsInitialized: stat failed: %v", err) + } + return !isEmptyDir(driverHome) +} + +// isEmptyDir checks if a directory is empty. It is used to check if prior +// storage-driver directories exist. If an error occurs, it also assumes the +// directory is not empty (which preserves the behavior _before_ this check +// was added) +func isEmptyDir(name string) bool { + f, err := os.Open(name) + if err != nil { + return false + } + defer f.Close() + + if _, err = f.Readdirnames(1); err == io.EOF { + return true + } + return false +} + +// isDeprecated checks if a storage-driver is marked "deprecated" +func isDeprecated(name string) bool { + switch name { + // NOTE: when deprecating a driver, update daemon.fillDriverInfo() accordingly + case "aufs", "devicemapper", "overlay": + return true + } + return false +} + +// logDeprecatedWarning logs a warning if the given storage-driver is marked "deprecated" +func logDeprecatedWarning(name string) { + if isDeprecated(name) { + logrus.Warnf("[graphdriver] WARNING: the %s storage-driver is deprecated, and will be removed in a future release", name) + } +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go new file mode 100644 index 000000000..cd83c4e21 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go @@ -0,0 +1,21 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + // List of drivers that should be used in an order + priority = "zfs" +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go new file mode 100644 index 000000000..61c6b24a9 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go @@ -0,0 +1,124 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "github.com/docker/docker/pkg/mount" + "golang.org/x/sys/unix" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // List of drivers that should be used in an order + priority = "btrfs,zfs,overlay2,aufs,overlay,devicemapper,vfs" + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicEcryptfs: "ecryptfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf unix.Statfs_t + if err := unix.Statfs(rootpath, &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := unix.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 000000000..1f2e8f071 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux,!windows,!freebsd + +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +var ( + // List of drivers that should be used in an order + priority = "unsupported" +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go new file mode 100644 index 000000000..856b575e7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go @@ -0,0 +1,12 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +var ( + // List of drivers that should be used in order + priority = "windowsfilter" +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/errors.go b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/errors.go new file mode 100644 index 000000000..96d354455 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/errors.go @@ -0,0 +1,36 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +const ( + // ErrNotSupported returned when driver is not supported. + ErrNotSupported NotSupportedError = "driver not supported" + // ErrPrerequisites returned when driver does not meet prerequisites. + ErrPrerequisites NotSupportedError = "prerequisites for driver not satisfied (wrong filesystem?)" + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS NotSupportedError = "backing file system is unsupported for this graph driver" +) + +// ErrUnSupported signals that the graph-driver is not supported on the current configuration +type ErrUnSupported interface { + NotSupported() +} + +// NotSupportedError signals that the graph-driver is not supported on the current configuration +type NotSupportedError string + +func (e NotSupportedError) Error() string { + return string(e) +} + +// NotSupported signals that a graph-driver is not supported. +func (e NotSupportedError) NotSupported() {} + +// IsDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func IsDriverNotSupported(err error) bool { + switch err.(type) { + case ErrUnSupported: + return true + default: + return false + } +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go new file mode 100644 index 000000000..f06caedb9 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go @@ -0,0 +1,175 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "io" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/sirupsen/logrus" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods on the local file system, +// which it may or may not support on its own. See the comment +// on the exported NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + layerRootFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + layerFs := layerRootFs.Path() + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentRootFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + parentFs := parentRootFs.Path() + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second))) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerRootFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + layerFs := layerRootFs.Path() + parentFs := "" + + if parent != "" { + parentRootFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + parentFs = parentRootFs.Path() + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerRootFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + layerFs := layerRootFs.Path() + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.WithField("id", id).Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.WithField("id", id).Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs.Path(), changes), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go new file mode 100644 index 000000000..b0983c566 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go @@ -0,0 +1,55 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "fmt" + "path/filepath" + + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin/v2" + "github.com/pkg/errors" +) + +func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if !config.ExperimentalEnabled { + return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") + } + pl, err := pg.Get(name, "GraphDriver", plugingetter.Acquire) + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, pl, config) +} + +func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) { + home := config.Root + if !pl.IsV1() { + if p, ok := pl.(*v2.Plugin); ok { + if p.PluginObj.Config.PropagatedMount != "" { + home = p.PluginObj.Config.PropagatedMount + } + } + } + + var proxy *graphDriverProxy + + switch pt := pl.(type) { + case plugingetter.PluginWithV1Client: + proxy = &graphDriverProxy{name, pl, Capabilities{}, pt.Client()} + case plugingetter.PluginAddr: + if pt.Protocol() != plugins.ProtocolSchemeHTTPV1 { + return nil, errors.Errorf("plugin protocol not supported: %s", pt.Protocol()) + } + addr := pt.Addr() + client, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, pt.Timeout()) + if err != nil { + return nil, errors.Wrap(err, "error creating plugin client") + } + proxy = &graphDriverProxy{name, pl, Capabilities{}, client} + default: + return nil, errdefs.System(errors.Errorf("got unknown plugin type %T", pt)) + } + + return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go new file mode 100644 index 000000000..cb350d807 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go @@ -0,0 +1,264 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "errors" + "fmt" + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +type graphDriverProxy struct { + name string + p plugingetter.CompatPlugin + caps Capabilities + client *plugins.Client +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + StorageOpt map[string]string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Capabilities Capabilities `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string `json:"Opts"` + UIDMaps []idtools.IDMap `json:"UIDMaps"` + GIDMaps []idtools.IDMap `json:"GIDMaps"` +} + +func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always acquire here, it will be cleaned up on daemon shutdown + cp.Acquire() + } + } + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + caps, err := d.fetchCaps() + if err != nil { + return err + } + d.caps = caps + return nil +} + +func (d *graphDriverProxy) fetchCaps() (Capabilities, error) { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Capabilities", args, &ret); err != nil { + if !plugins.IsNotFound(err) { + return Capabilities{}, err + } + } + return ret.Capabilities, nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) Capabilities() Capabilities { + return d.caps +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.CreateReadWrite", id, parent, opts) +} + +func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.Create", id, parent, opts) +} + +func (d *graphDriverProxy) create(method, id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + var ret graphDriverResponse + if err := d.client.Call(method, args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (containerfs.ContainerFS, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { + return nil, err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return containerfs.NewLocalContainerFS(d.p.ScopedPath(ret.Dir)), err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always release + defer cp.Release() + } + } + + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.client.Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return body, nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/adapter.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/adapter.go new file mode 100644 index 000000000..d9370352c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/adapter.go @@ -0,0 +1,129 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/docker/docker/api/types/plugins/logdriver" + "github.com/docker/docker/pkg/plugingetter" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// pluginAdapter takes a plugin and implements the Logger interface for logger +// instances +type pluginAdapter struct { + driverName string + id string + plugin logPlugin + fifoPath string + capabilities Capability + logInfo Info + + // synchronize access to the log stream and shared buffer + mu sync.Mutex + enc logdriver.LogEntryEncoder + stream io.WriteCloser + // buf is shared for each `Log()` call to reduce allocations. + // buf must be protected by mutex + buf logdriver.LogEntry +} + +func (a *pluginAdapter) Log(msg *Message) error { + a.mu.Lock() + + a.buf.Line = msg.Line + a.buf.TimeNano = msg.Timestamp.UnixNano() + a.buf.Partial = msg.PLogMetaData != nil + a.buf.Source = msg.Source + + err := a.enc.Encode(&a.buf) + a.buf.Reset() + + a.mu.Unlock() + + PutMessage(msg) + return err +} + +func (a *pluginAdapter) Name() string { + return a.driverName +} + +func (a *pluginAdapter) Close() error { + a.mu.Lock() + defer a.mu.Unlock() + + if err := a.plugin.StopLogging(filepath.Join("/", "run", "docker", "logging", a.id)); err != nil { + return err + } + + if err := a.stream.Close(); err != nil { + logrus.WithError(err).Error("error closing plugin fifo") + } + if err := os.Remove(a.fifoPath); err != nil && !os.IsNotExist(err) { + logrus.WithError(err).Error("error cleaning up plugin fifo") + } + + // may be nil, especially for unit tests + if pluginGetter != nil { + pluginGetter.Get(a.Name(), extName, plugingetter.Release) + } + return nil +} + +type pluginAdapterWithRead struct { + *pluginAdapter +} + +func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher { + watcher := NewLogWatcher() + + go func() { + defer close(watcher.Msg) + stream, err := a.plugin.ReadLogs(a.logInfo, config) + if err != nil { + watcher.Err <- errors.Wrap(err, "error getting log reader") + return + } + defer stream.Close() + + dec := logdriver.NewLogEntryDecoder(stream) + for { + var buf logdriver.LogEntry + if err := dec.Decode(&buf); err != nil { + if err == io.EOF { + return + } + watcher.Err <- errors.Wrap(err, "error decoding log message") + return + } + + msg := &Message{ + Timestamp: time.Unix(0, buf.TimeNano), + Line: buf.Line, + Source: buf.Source, + } + + // plugin should handle this, but check just in case + if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { + continue + } + if !config.Until.IsZero() && msg.Timestamp.After(config.Until) { + return + } + + // send the message unless the consumer is gone + select { + case watcher.Msg <- msg: + case <-watcher.WatchConsumerGone(): + return + } + } + }() + + return watcher +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/copier.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/copier.go new file mode 100644 index 000000000..e24272fa6 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/copier.go @@ -0,0 +1,186 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "bytes" + "io" + "sync" + "time" + + types "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/stringid" + "github.com/sirupsen/logrus" +) + +const ( + // readSize is the maximum bytes read during a single read + // operation. + readSize = 2 * 1024 + + // defaultBufSize provides a reasonable default for loggers that do + // not have an external limit to impose on log line size. + defaultBufSize = 16 * 1024 +) + +// Copier can copy logs from specified sources to Logger and attach Timestamp. +// Writes are concurrent, so you need implement some sync in your logger. +type Copier struct { + // srcs is map of name -> reader pairs, for example "stdout", "stderr" + srcs map[string]io.Reader + dst Logger + copyJobs sync.WaitGroup + closeOnce sync.Once + closed chan struct{} +} + +// NewCopier creates a new Copier +func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier { + return &Copier{ + srcs: srcs, + dst: dst, + closed: make(chan struct{}), + } +} + +// Run starts logs copying +func (c *Copier) Run() { + for src, w := range c.srcs { + c.copyJobs.Add(1) + go c.copySrc(src, w) + } +} + +func (c *Copier) copySrc(name string, src io.Reader) { + defer c.copyJobs.Done() + + bufSize := defaultBufSize + if sizedLogger, ok := c.dst.(SizedLogger); ok { + bufSize = sizedLogger.BufSize() + } + buf := make([]byte, bufSize) + + n := 0 + eof := false + var partialid string + var partialTS time.Time + var ordinal int + firstPartial := true + hasMorePartial := false + + for { + select { + case <-c.closed: + return + default: + // Work out how much more data we are okay with reading this time. + upto := n + readSize + if upto > cap(buf) { + upto = cap(buf) + } + // Try to read that data. + if upto > n { + read, err := src.Read(buf[n:upto]) + if err != nil { + if err != io.EOF { + logReadsFailedCount.Inc(1) + logrus.Errorf("Error scanning log stream: %s", err) + return + } + eof = true + } + n += read + } + // If we have no data to log, and there's no more coming, we're done. + if n == 0 && eof { + return + } + // Break up the data that we've buffered up into lines, and log each in turn. + p := 0 + + for q := bytes.IndexByte(buf[p:n], '\n'); q >= 0; q = bytes.IndexByte(buf[p:n], '\n') { + select { + case <-c.closed: + return + default: + msg := NewMessage() + msg.Source = name + msg.Line = append(msg.Line, buf[p:p+q]...) + + if hasMorePartial { + msg.PLogMetaData = &types.PartialLogMetaData{ID: partialid, Ordinal: ordinal, Last: true} + + // reset + partialid = "" + ordinal = 0 + firstPartial = true + hasMorePartial = false + } + if msg.PLogMetaData == nil { + msg.Timestamp = time.Now().UTC() + } else { + msg.Timestamp = partialTS + } + + if logErr := c.dst.Log(msg); logErr != nil { + logWritesFailedCount.Inc(1) + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + } + p += q + 1 + } + // If there's no more coming, or the buffer is full but + // has no newlines, log whatever we haven't logged yet, + // noting that it's a partial log line. + if eof || (p == 0 && n == len(buf)) { + if p < n { + msg := NewMessage() + msg.Source = name + msg.Line = append(msg.Line, buf[p:n]...) + + // Generate unique partialID for first partial. Use it across partials. + // Record timestamp for first partial. Use it across partials. + // Initialize Ordinal for first partial. Increment it across partials. + if firstPartial { + msg.Timestamp = time.Now().UTC() + partialTS = msg.Timestamp + partialid = stringid.GenerateRandomID() + ordinal = 1 + firstPartial = false + totalPartialLogs.Inc(1) + } else { + msg.Timestamp = partialTS + } + msg.PLogMetaData = &types.PartialLogMetaData{ID: partialid, Ordinal: ordinal, Last: false} + ordinal++ + hasMorePartial = true + + if logErr := c.dst.Log(msg); logErr != nil { + logWritesFailedCount.Inc(1) + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + p = 0 + n = 0 + } + if eof { + return + } + } + // Move any unlogged data to the front of the buffer in preparation for another read. + if p > 0 { + copy(buf[0:], buf[p:n]) + n -= p + } + } + } +} + +// Wait waits until all copying is done +func (c *Copier) Wait() { + c.copyJobs.Wait() +} + +// Close closes the copier +func (c *Copier) Close() { + c.closeOnce.Do(func() { + close(c.closed) + }) +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/factory.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/factory.go new file mode 100644 index 000000000..84b54b279 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/factory.go @@ -0,0 +1,162 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "fmt" + "sort" + "sync" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/go-units" + "github.com/pkg/errors" +) + +// Creator builds a logging driver instance with given context. +type Creator func(Info) (Logger, error) + +// LogOptValidator checks the options specific to the underlying +// logging implementation. +type LogOptValidator func(cfg map[string]string) error + +type logdriverFactory struct { + registry map[string]Creator + optValidator map[string]LogOptValidator + m sync.Mutex +} + +func (lf *logdriverFactory) list() []string { + ls := make([]string, 0, len(lf.registry)) + lf.m.Lock() + for name := range lf.registry { + ls = append(ls, name) + } + lf.m.Unlock() + sort.Strings(ls) + return ls +} + +// ListDrivers gets the list of registered log driver names +func ListDrivers() []string { + return factory.list() +} + +func (lf *logdriverFactory) register(name string, c Creator) error { + if lf.driverRegistered(name) { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + + lf.m.Lock() + lf.registry[name] = c + lf.m.Unlock() + return nil +} + +func (lf *logdriverFactory) driverRegistered(name string) bool { + lf.m.Lock() + _, ok := lf.registry[name] + lf.m.Unlock() + if !ok { + if pluginGetter != nil { // this can be nil when the init functions are running + if l, _ := getPlugin(name, plugingetter.Lookup); l != nil { + return true + } + } + } + return ok +} + +func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.optValidator[name]; ok { + return fmt.Errorf("logger: log validator named '%s' is already registered", name) + } + lf.optValidator[name] = l + return nil +} + +func (lf *logdriverFactory) get(name string) (Creator, error) { + lf.m.Lock() + defer lf.m.Unlock() + + c, ok := lf.registry[name] + if ok { + return c, nil + } + + c, err := getPlugin(name, plugingetter.Acquire) + return c, errors.Wrapf(err, "logger: no log driver named '%s' is registered", name) +} + +func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { + lf.m.Lock() + defer lf.m.Unlock() + + c := lf.optValidator[name] + return c +} + +var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance + +// RegisterLogDriver registers the given logging driver builder with given logging +// driver name. +func RegisterLogDriver(name string, c Creator) error { + return factory.register(name, c) +} + +// RegisterLogOptValidator registers the logging option validator with +// the given logging driver name. +func RegisterLogOptValidator(name string, l LogOptValidator) error { + return factory.registerLogOptValidator(name, l) +} + +// GetLogDriver provides the logging driver builder for a logging driver name. +func GetLogDriver(name string) (Creator, error) { + return factory.get(name) +} + +var builtInLogOpts = map[string]bool{ + "mode": true, + "max-buffer-size": true, +} + +// ValidateLogOpts checks the options for the given log driver. The +// options supported are specific to the LogDriver implementation. +func ValidateLogOpts(name string, cfg map[string]string) error { + if name == "none" { + return nil + } + + switch containertypes.LogMode(cfg["mode"]) { + case containertypes.LogModeBlocking, containertypes.LogModeNonBlock, containertypes.LogModeUnset: + default: + return fmt.Errorf("logger: logging mode not supported: %s", cfg["mode"]) + } + + if s, ok := cfg["max-buffer-size"]; ok { + if containertypes.LogMode(cfg["mode"]) != containertypes.LogModeNonBlock { + return fmt.Errorf("logger: max-buffer-size option is only supported with 'mode=%s'", containertypes.LogModeNonBlock) + } + if _, err := units.RAMInBytes(s); err != nil { + return errors.Wrap(err, "error parsing option max-buffer-size") + } + } + + if !factory.driverRegistered(name) { + return fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + + filteredOpts := make(map[string]string, len(builtInLogOpts)) + for k, v := range cfg { + if !builtInLogOpts[k] { + filteredOpts[k] = v + } + } + + validator := factory.getLogOptValidator(name) + if validator != nil { + return validator(filteredOpts) + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go new file mode 100644 index 000000000..bbb8eeb7e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -0,0 +1,186 @@ +// Package jsonfilelog provides the default Logger implementation for +// Docker logging. This logger logs to files on the host server in the +// JSON format. +package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "sync" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/go-units" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Name is the name of the file that the jsonlogger logs to. +const Name = "json-file" + +// JSONFileLogger is Logger implementation for default Docker logging. +type JSONFileLogger struct { + mu sync.Mutex + closed bool + writer *loggerutils.LogFile + readers map[*logger.LogWatcher]struct{} // stores the active log followers + tag string // tag values requested by the user to log +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates new JSONFileLogger which writes to filename passed in +// on given context. +func New(info logger.Info) (logger.Logger, error) { + var capval int64 = -1 + if capacity, ok := info.Config["max-size"]; ok { + var err error + capval, err = units.FromHumanSize(capacity) + if err != nil { + return nil, err + } + if capval <= 0 { + return nil, fmt.Errorf("max-size must be a positive number") + } + } + var maxFiles = 1 + if maxFileString, ok := info.Config["max-file"]; ok { + var err error + maxFiles, err = strconv.Atoi(maxFileString) + if err != nil { + return nil, err + } + if maxFiles < 1 { + return nil, fmt.Errorf("max-file cannot be less than 1") + } + } + + var compress bool + if compressString, ok := info.Config["compress"]; ok { + var err error + compress, err = strconv.ParseBool(compressString) + if err != nil { + return nil, err + } + if compress && (maxFiles == 1 || capval == -1) { + return nil, fmt.Errorf("compress cannot be true when max-file is less than 2 or max-size is not set") + } + } + + attrs, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + + // no default template. only use a tag if the user asked for it + tag, err := loggerutils.ParseLogTag(info, "") + if err != nil { + return nil, err + } + if tag != "" { + attrs["tag"] = tag + } + + var extra []byte + if len(attrs) > 0 { + var err error + extra, err = json.Marshal(attrs) + if err != nil { + return nil, err + } + } + + buf := bytes.NewBuffer(nil) + marshalFunc := func(msg *logger.Message) ([]byte, error) { + if err := marshalMessage(msg, extra, buf); err != nil { + return nil, err + } + b := buf.Bytes() + buf.Reset() + return b, nil + } + + writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, compress, marshalFunc, decodeFunc, 0640, getTailReader) + if err != nil { + return nil, err + } + + return &JSONFileLogger{ + writer: writer, + readers: make(map[*logger.LogWatcher]struct{}), + tag: tag, + }, nil +} + +// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. +func (l *JSONFileLogger) Log(msg *logger.Message) error { + l.mu.Lock() + err := l.writer.WriteLogEntry(msg) + l.mu.Unlock() + return err +} + +func marshalMessage(msg *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error { + logLine := msg.Line + if msg.PLogMetaData == nil || (msg.PLogMetaData != nil && msg.PLogMetaData.Last) { + logLine = append(msg.Line, '\n') + } + err := (&jsonlog.JSONLogs{ + Log: logLine, + Stream: msg.Source, + Created: msg.Timestamp, + RawAttrs: extra, + }).MarshalJSONBuf(buf) + if err != nil { + return errors.Wrap(err, "error writing log message to buffer") + } + err = buf.WriteByte('\n') + return errors.Wrap(err, "error finalizing log buffer") +} + +// ValidateLogOpt looks for json specific log options max-file & max-size. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "max-file": + case "max-size": + case "compress": + case "labels": + case "env": + case "env-regex": + case "tag": + default: + return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) + } + } + return nil +} + +// Close closes underlying file and signals all the readers +// that the logs producer is gone. +func (l *JSONFileLogger) Close() error { + l.mu.Lock() + l.closed = true + err := l.writer.Close() + for r := range l.readers { + r.ProducerGone() + delete(l.readers, r) + } + l.mu.Unlock() + return err +} + +// Name returns name of this logger. +func (l *JSONFileLogger) Name() string { + return Name +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go new file mode 100644 index 000000000..74be8e7da --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go @@ -0,0 +1,25 @@ +package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + +import ( + "time" +) + +// JSONLog is a log message, typically a single entry from a given log stream. +type JSONLog struct { + // Log is the log message + Log string `json:"log,omitempty"` + // Stream is the log source + Stream string `json:"stream,omitempty"` + // Created is the created timestamp of log + Created time.Time `json:"time"` + // Attrs is the list of extra attributes provided by the user + Attrs map[string]string `json:"attrs,omitempty"` +} + +// Reset all fields to their zero value. +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} + jl.Attrs = make(map[string]string) +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go new file mode 100644 index 000000000..577c718f6 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go @@ -0,0 +1,125 @@ +package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + +import ( + "bytes" + "encoding/json" + "time" + "unicode/utf8" +) + +// JSONLogs marshals encoded JSONLog objects +type JSONLogs struct { + Log []byte `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created time.Time `json:"time"` + + // json-encoded bytes + RawAttrs json.RawMessage `json:"attrs,omitempty"` +} + +// MarshalJSONBuf is an optimized JSON marshaller that avoids reflection +// and unnecessary allocation. +func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { + var first = true + + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONBytesAsString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONBytesAsString(buf, []byte(mj.Stream)) + } + if len(mj.RawAttrs) > 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"attrs":`) + buf.Write(mj.RawAttrs) + } + if !first { + buf.WriteString(`,`) + } + + created, err := fastTimeMarshalJSON(mj.Created) + if err != nil { + return err + } + + buf.WriteString(`"time":`) + buf.WriteString(created) + buf.WriteString(`}`) + return nil +} + +func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go new file mode 100644 index 000000000..1822ea5db --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go @@ -0,0 +1,20 @@ +package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + +import ( + "time" + + "github.com/pkg/errors" +) + +const jsonFormat = `"` + time.RFC3339Nano + `"` + +// fastTimeMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. +func fastTimeMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(jsonFormat), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go new file mode 100644 index 000000000..12f676bb1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go @@ -0,0 +1,97 @@ +package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" + +import ( + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/tailfile" + "github.com/sirupsen/logrus" +) + +const maxJSONDecodeRetry = 20000 + +// ReadLogs implements the logger's LogReader interface for the logs +// created by this driver. +func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go l.readLogs(logWatcher, config) + return logWatcher +} + +func (l *JSONFileLogger) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(watcher.Msg) + + l.mu.Lock() + l.readers[watcher] = struct{}{} + l.mu.Unlock() + + l.writer.ReadLogs(config, watcher) + + l.mu.Lock() + delete(l.readers, watcher) + l.mu.Unlock() +} + +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { + return nil, err + } + + var attrs []backend.LogAttr + if len(l.Attrs) != 0 { + attrs = make([]backend.LogAttr, 0, len(l.Attrs)) + for k, v := range l.Attrs { + attrs = append(attrs, backend.LogAttr{Key: k, Value: v}) + } + } + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + Attrs: attrs, + } + return msg, nil +} + +// decodeFunc is used to create a decoder for the log file reader +func decodeFunc(rdr io.Reader) func() (*logger.Message, error) { + l := &jsonlog.JSONLog{} + dec := json.NewDecoder(rdr) + return func() (msg *logger.Message, err error) { + for retries := 0; retries < maxJSONDecodeRetry; retries++ { + msg, err = decodeLogLine(dec, l) + if err == nil || err == io.EOF { + break + } + + logrus.WithError(err).WithField("retries", retries).Warn("got error while decoding json") + // try again, could be due to a an incomplete json object as we read + if _, ok := err.(*json.SyntaxError); ok { + dec = json.NewDecoder(rdr) + continue + } + + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF { + reader := io.MultiReader(dec.Buffered(), rdr) + dec = json.NewDecoder(reader) + continue + } + } + return msg, err + } +} + +func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) { + return tailfile.NewTailReader(ctx, r, req) +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/local/config.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/local/config.go new file mode 100644 index 000000000..8de160aea --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/local/config.go @@ -0,0 +1,36 @@ +package local + +import ( + "github.com/pkg/errors" +) + +// CreateConfig is used to configure new instances of driver +type CreateConfig struct { + DisableCompression bool + MaxFileSize int64 + MaxFileCount int +} + +func newDefaultConfig() *CreateConfig { + return &CreateConfig{ + MaxFileSize: defaultMaxFileSize, + MaxFileCount: defaultMaxFileCount, + DisableCompression: !defaultCompressLogs, + } +} + +func validateConfig(cfg *CreateConfig) error { + if cfg.MaxFileSize < 0 { + return errors.New("max size should be a positive number") + } + if cfg.MaxFileCount < 0 { + return errors.New("max file count cannot be less than 0") + } + + if !cfg.DisableCompression { + if cfg.MaxFileCount <= 1 { + return errors.New("compression cannot be enabled when max file count is 1") + } + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/local/doc.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/local/doc.go new file mode 100644 index 000000000..7b082a3d5 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/local/doc.go @@ -0,0 +1,9 @@ +// Package local provides a logger implementation that stores logs on disk. +// +// Log messages are encoded as protobufs with a header and footer for each message. +// The header and footer are big-endian binary encoded uint32 values which indicate the size of the log message. +// The header and footer of each message allows you to efficiently read through a file either forwards or in +// backwards (such as is the case when tailing a file) +// +// Example log message format: [22][This is a log message.][22][28][This is another log message.][28] +package local // import "github.com/docker/docker/daemon/logger/local" diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/local/local.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/local/local.go new file mode 100644 index 000000000..ba4aa096f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/local/local.go @@ -0,0 +1,218 @@ +package local // import "github.com/docker/docker/daemon/logger/local" + +import ( + "encoding/binary" + "io" + "strconv" + "sync" + "time" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/plugins/logdriver" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/errdefs" + "github.com/docker/go-units" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // Name is the name of the driver + Name = "local" + + encodeBinaryLen = 4 + initialBufSize = 2048 + maxDecodeRetry = 20000 + + defaultMaxFileSize int64 = 20 * 1024 * 1024 + defaultMaxFileCount = 5 + defaultCompressLogs = true +) + +// LogOptKeys are the keys names used for log opts passed in to initialize the driver. +var LogOptKeys = map[string]bool{ + "max-file": true, + "max-size": true, + "compress": true, +} + +// ValidateLogOpt looks for log driver specific options. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + if !LogOptKeys[key] { + return errors.Errorf("unknown log opt '%s' for log driver %s", key, Name) + } + } + return nil +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +type driver struct { + mu sync.Mutex + closed bool + logfile *loggerutils.LogFile + readers map[*logger.LogWatcher]struct{} // stores the active log followers +} + +// New creates a new local logger +// You must provide the `LogPath` in the passed in info argument, this is the file path that logs are written to. +func New(info logger.Info) (logger.Logger, error) { + if info.LogPath == "" { + return nil, errdefs.System(errors.New("log path is missing -- this is a bug and should not happen")) + } + + cfg := newDefaultConfig() + if capacity, ok := info.Config["max-size"]; ok { + var err error + cfg.MaxFileSize, err = units.FromHumanSize(capacity) + if err != nil { + return nil, errdefs.InvalidParameter(errors.Wrapf(err, "invalid value for max-size: %s", capacity)) + } + } + + if userMaxFileCount, ok := info.Config["max-file"]; ok { + var err error + cfg.MaxFileCount, err = strconv.Atoi(userMaxFileCount) + if err != nil { + return nil, errdefs.InvalidParameter(errors.Wrapf(err, "invalid value for max-file: %s", userMaxFileCount)) + } + } + + if userCompress, ok := info.Config["compress"]; ok { + compressLogs, err := strconv.ParseBool(userCompress) + if err != nil { + return nil, errdefs.InvalidParameter(errors.Wrap(err, "error reading compress log option")) + } + cfg.DisableCompression = !compressLogs + } + return newDriver(info.LogPath, cfg) +} + +func makeMarshaller() func(m *logger.Message) ([]byte, error) { + buf := make([]byte, initialBufSize) + + // allocate the partial log entry separately, which allows for easier re-use + proto := &logdriver.LogEntry{} + md := &logdriver.PartialLogEntryMetadata{} + + return func(m *logger.Message) ([]byte, error) { + resetProto(proto) + + messageToProto(m, proto, md) + protoSize := proto.Size() + writeLen := protoSize + (2 * encodeBinaryLen) //+ len(messageDelimiter) + + if writeLen > len(buf) { + buf = make([]byte, writeLen) + } else { + // shrink the buffer back down + if writeLen <= initialBufSize { + buf = buf[:initialBufSize] + } else { + buf = buf[:writeLen] + } + } + + binary.BigEndian.PutUint32(buf[:encodeBinaryLen], uint32(protoSize)) + n, err := proto.MarshalTo(buf[encodeBinaryLen:writeLen]) + if err != nil { + return nil, errors.Wrap(err, "error marshaling log entry") + } + if n+(encodeBinaryLen*2) != writeLen { + return nil, io.ErrShortWrite + } + binary.BigEndian.PutUint32(buf[writeLen-encodeBinaryLen:writeLen], uint32(protoSize)) + return buf[:writeLen], nil + } +} + +func newDriver(logPath string, cfg *CreateConfig) (logger.Logger, error) { + if err := validateConfig(cfg); err != nil { + return nil, errdefs.InvalidParameter(err) + } + + lf, err := loggerutils.NewLogFile(logPath, cfg.MaxFileSize, cfg.MaxFileCount, !cfg.DisableCompression, makeMarshaller(), decodeFunc, 0640, getTailReader) + if err != nil { + return nil, err + } + return &driver{ + logfile: lf, + readers: make(map[*logger.LogWatcher]struct{}), + }, nil +} + +func (d *driver) Name() string { + return Name +} + +func (d *driver) Log(msg *logger.Message) error { + d.mu.Lock() + err := d.logfile.WriteLogEntry(msg) + d.mu.Unlock() + return err +} + +func (d *driver) Close() error { + d.mu.Lock() + d.closed = true + err := d.logfile.Close() + for r := range d.readers { + r.ProducerGone() + delete(d.readers, r) + } + d.mu.Unlock() + return err +} + +func messageToProto(msg *logger.Message, proto *logdriver.LogEntry, partial *logdriver.PartialLogEntryMetadata) { + proto.Source = msg.Source + proto.TimeNano = msg.Timestamp.UnixNano() + proto.Line = append(proto.Line[:0], msg.Line...) + proto.Partial = msg.PLogMetaData != nil + if proto.Partial { + partial.Ordinal = int32(msg.PLogMetaData.Ordinal) + partial.Last = msg.PLogMetaData.Last + partial.Id = msg.PLogMetaData.ID + proto.PartialLogMetadata = partial + } else { + proto.PartialLogMetadata = nil + } +} + +func protoToMessage(proto *logdriver.LogEntry) *logger.Message { + msg := &logger.Message{ + Source: proto.Source, + Timestamp: time.Unix(0, proto.TimeNano), + } + if proto.Partial { + var md backend.PartialLogMetaData + md.Last = proto.GetPartialLogMetadata().GetLast() + md.ID = proto.GetPartialLogMetadata().GetId() + md.Ordinal = int(proto.GetPartialLogMetadata().GetOrdinal()) + msg.PLogMetaData = &md + } + msg.Line = append(msg.Line[:0], proto.Line...) + return msg +} + +func resetProto(proto *logdriver.LogEntry) { + proto.Source = "" + proto.Line = proto.Line[:0] + proto.TimeNano = 0 + proto.Partial = false + if proto.PartialLogMetadata != nil { + proto.PartialLogMetadata.Id = "" + proto.PartialLogMetadata.Last = false + proto.PartialLogMetadata.Ordinal = 0 + } + proto.PartialLogMetadata = nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/local/read.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/local/read.go new file mode 100644 index 000000000..a752de2a8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/local/read.go @@ -0,0 +1,174 @@ +package local + +import ( + "context" + "encoding/binary" + "io" + + "bytes" + + "github.com/docker/docker/api/types/plugins/logdriver" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +func (d *driver) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go d.readLogs(logWatcher, config) + return logWatcher +} + +func (d *driver) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(watcher.Msg) + + d.mu.Lock() + d.readers[watcher] = struct{}{} + d.mu.Unlock() + + d.logfile.ReadLogs(config, watcher) + + d.mu.Lock() + delete(d.readers, watcher) + d.mu.Unlock() +} + +func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) { + size := r.Size() + if req < 0 { + return nil, 0, errdefs.InvalidParameter(errors.Errorf("invalid number of lines to tail: %d", req)) + } + + if size < (encodeBinaryLen*2)+1 { + return bytes.NewReader(nil), 0, nil + } + + const encodeBinaryLen64 = int64(encodeBinaryLen) + var found int + + buf := make([]byte, encodeBinaryLen) + + offset := size + for { + select { + case <-ctx.Done(): + return nil, 0, ctx.Err() + default: + } + + n, err := r.ReadAt(buf, offset-encodeBinaryLen64) + if err != nil && err != io.EOF { + return nil, 0, errors.Wrap(err, "error reading log message footer") + } + + if n != encodeBinaryLen { + return nil, 0, errdefs.DataLoss(errors.New("unexpected number of bytes read from log message footer")) + } + + msgLen := binary.BigEndian.Uint32(buf) + + n, err = r.ReadAt(buf, offset-encodeBinaryLen64-encodeBinaryLen64-int64(msgLen)) + if err != nil && err != io.EOF { + return nil, 0, errors.Wrap(err, "error reading log message header") + } + + if n != encodeBinaryLen { + return nil, 0, errdefs.DataLoss(errors.New("unexpected number of bytes read from log message header")) + } + + if msgLen != binary.BigEndian.Uint32(buf) { + return nil, 0, errdefs.DataLoss(errors.Wrap(err, "log message header and footer indicate different message sizes")) + } + + found++ + offset -= int64(msgLen) + offset -= encodeBinaryLen64 * 2 + if found == req { + break + } + if offset <= 0 { + break + } + } + + return io.NewSectionReader(r, offset, size), found, nil +} + +func decodeFunc(rdr io.Reader) func() (*logger.Message, error) { + proto := &logdriver.LogEntry{} + buf := make([]byte, initialBufSize) + + return func() (*logger.Message, error) { + var ( + read int + err error + ) + + resetProto(proto) + + for i := 0; i < maxDecodeRetry; i++ { + var n int + n, err = io.ReadFull(rdr, buf[read:encodeBinaryLen]) + if err != nil { + if err != io.ErrUnexpectedEOF { + return nil, errors.Wrap(err, "error reading log message length") + } + read += n + continue + } + read += n + break + } + if err != nil { + return nil, errors.Wrapf(err, "could not read log message length: read: %d, expected: %d", read, encodeBinaryLen) + } + + msgLen := int(binary.BigEndian.Uint32(buf[:read])) + + if len(buf) < msgLen+encodeBinaryLen { + buf = make([]byte, msgLen+encodeBinaryLen) + } else { + if msgLen <= initialBufSize { + buf = buf[:initialBufSize] + } else { + buf = buf[:msgLen+encodeBinaryLen] + } + } + + return decodeLogEntry(rdr, proto, buf, msgLen) + } +} + +func decodeLogEntry(rdr io.Reader, proto *logdriver.LogEntry, buf []byte, msgLen int) (*logger.Message, error) { + var ( + read int + err error + ) + for i := 0; i < maxDecodeRetry; i++ { + var n int + n, err = io.ReadFull(rdr, buf[read:msgLen+encodeBinaryLen]) + if err != nil { + if err != io.ErrUnexpectedEOF { + return nil, errors.Wrap(err, "could not decode log entry") + } + read += n + continue + } + break + } + if err != nil { + return nil, errors.Wrapf(err, "could not decode entry: read %d, expected: %d", read, msgLen) + } + + if err := proto.Unmarshal(buf[:msgLen]); err != nil { + return nil, errors.Wrap(err, "error unmarshalling log entry") + } + + msg := protoToMessage(proto) + if msg.PLogMetaData == nil { + msg.Line = append(msg.Line, '\n') + } + return msg, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/logger.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/logger.go new file mode 100644 index 000000000..6308270b2 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/logger.go @@ -0,0 +1,162 @@ +// Package logger defines interfaces that logger drivers implement to +// log messages. +// +// The other half of a logger driver is the implementation of the +// factory, which holds the contextual instance information that +// allows multiple loggers of the same type to perform different +// actions, such as logging to different locations. +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "sync" + "time" + + "github.com/docker/docker/api/types/backend" +) + +// ErrReadLogsNotSupported is returned when the underlying log driver does not support reading +type ErrReadLogsNotSupported struct{} + +func (ErrReadLogsNotSupported) Error() string { + return "configured logging driver does not support reading" +} + +// NotImplemented makes this error implement the `NotImplemented` interface from api/errdefs +func (ErrReadLogsNotSupported) NotImplemented() {} + +const ( + logWatcherBufferSize = 4096 +) + +var messagePool = &sync.Pool{New: func() interface{} { return &Message{Line: make([]byte, 0, 256)} }} + +// NewMessage returns a new message from the message sync.Pool +func NewMessage() *Message { + return messagePool.Get().(*Message) +} + +// PutMessage puts the specified message back n the message pool. +// The message fields are reset before putting into the pool. +func PutMessage(msg *Message) { + msg.reset() + messagePool.Put(msg) +} + +// Message is data structure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +// +// Message is subtyped from backend.LogMessage because there is a lot of +// internal complexity around the Message type that should not be exposed +// to any package not explicitly importing the logger type. +// +// Any changes made to this struct must also be updated in the `reset` function +type Message backend.LogMessage + +// reset sets the message back to default values +// This is used when putting a message back into the message pool. +// Any changes to the `Message` struct should be reflected here. +func (m *Message) reset() { + m.Line = m.Line[:0] + m.Source = "" + m.Attrs = nil + m.PLogMetaData = nil + + m.Err = nil +} + +// AsLogMessage returns a pointer to the message as a pointer to +// backend.LogMessage, which is an identical type with a different purpose +func (m *Message) AsLogMessage() *backend.LogMessage { + return (*backend.LogMessage)(m) +} + +// Logger is the interface for docker logging drivers. +type Logger interface { + Log(*Message) error + Name() string + Close() error +} + +// SizedLogger is the interface for logging drivers that can control +// the size of buffer used for their messages. +type SizedLogger interface { + Logger + BufSize() int +} + +// ReadConfig is the configuration passed into ReadLogs. +type ReadConfig struct { + Since time.Time + Until time.Time + Tail int + Follow bool +} + +// LogReader is the interface for reading log messages for loggers that support reading. +type LogReader interface { + // Read logs from underlying logging backend + ReadLogs(ReadConfig) *LogWatcher +} + +// LogWatcher is used when consuming logs read from the LogReader interface. +type LogWatcher struct { + // For sending log messages to a reader. + Msg chan *Message + // For sending error messages that occur while reading logs. + Err chan error + producerOnce sync.Once + producerGone chan struct{} + consumerOnce sync.Once + consumerGone chan struct{} +} + +// NewLogWatcher returns a new LogWatcher. +func NewLogWatcher() *LogWatcher { + return &LogWatcher{ + Msg: make(chan *Message, logWatcherBufferSize), + Err: make(chan error, 1), + producerGone: make(chan struct{}), + consumerGone: make(chan struct{}), + } +} + +// ProducerGone notifies the underlying log reader that +// the logs producer (a container) is gone. +func (w *LogWatcher) ProducerGone() { + // only close if not already closed + w.producerOnce.Do(func() { + close(w.producerGone) + }) +} + +// WatchProducerGone returns a channel receiver that receives notification +// once the logs producer (a container) is gone. +func (w *LogWatcher) WatchProducerGone() <-chan struct{} { + return w.producerGone +} + +// ConsumerGone notifies that the logs consumer is gone. +func (w *LogWatcher) ConsumerGone() { + // only close if not already closed + w.consumerOnce.Do(func() { + close(w.consumerGone) + }) +} + +// WatchConsumerGone returns a channel receiver that receives notification +// when the log watcher consumer is gone. +func (w *LogWatcher) WatchConsumerGone() <-chan struct{} { + return w.consumerGone +} + +// Capability defines the list of capabilities that a driver can implement +// These capabilities are not required to be a logging driver, however do +// determine how a logging driver can be used +type Capability struct { + // Determines if a log driver can read back logs + ReadLogs bool +} + +// MarshalFunc is a func that marshals a message into an arbitrary format +type MarshalFunc func(*Message) ([]byte, error) diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go new file mode 100644 index 000000000..719512dbd --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go @@ -0,0 +1,31 @@ +package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" + +import ( + "bytes" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/templates" +) + +// DefaultTemplate defines the defaults template logger should use. +const DefaultTemplate = "{{.ID}}" + +// ParseLogTag generates a context aware tag for consistency across different +// log drivers based on the context of the running container. +func ParseLogTag(info logger.Info, defaultTemplate string) (string, error) { + tagTemplate := info.Config["tag"] + if tagTemplate == "" { + tagTemplate = defaultTemplate + } + + tmpl, err := templates.NewParse("log-tag", tagTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &info); err != nil { + return "", err + } + + return buf.String(), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go new file mode 100644 index 000000000..fa45154ec --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go @@ -0,0 +1,714 @@ +package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" + +import ( + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/pubsub" + "github.com/fsnotify/fsnotify" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const tmpLogfileSuffix = ".tmp" + +// rotateFileMetadata is a metadata of the gzip header of the compressed log file +type rotateFileMetadata struct { + LastTime time.Time `json:"lastTime,omitempty"` +} + +// refCounter is a counter of logfile being referenced +type refCounter struct { + mu sync.Mutex + counter map[string]int +} + +// Reference increase the reference counter for specified logfile +func (rc *refCounter) GetReference(fileName string, openRefFile func(fileName string, exists bool) (*os.File, error)) (*os.File, error) { + rc.mu.Lock() + defer rc.mu.Unlock() + + var ( + file *os.File + err error + ) + _, ok := rc.counter[fileName] + file, err = openRefFile(fileName, ok) + if err != nil { + return nil, err + } + + if ok { + rc.counter[fileName]++ + } else if file != nil { + rc.counter[file.Name()] = 1 + } + + return file, nil +} + +// Dereference reduce the reference counter for specified logfile +func (rc *refCounter) Dereference(fileName string) error { + rc.mu.Lock() + defer rc.mu.Unlock() + + rc.counter[fileName]-- + if rc.counter[fileName] <= 0 { + delete(rc.counter, fileName) + err := os.Remove(fileName) + if err != nil { + return err + } + } + return nil +} + +// LogFile is Logger implementation for default Docker logging. +type LogFile struct { + mu sync.RWMutex // protects the logfile access + f *os.File // store for closing + closed bool + rotateMu sync.Mutex // blocks the next rotation until the current rotation is completed + capacity int64 // maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int // maximum number of files + compress bool // whether old versions of log files are compressed + lastTimestamp time.Time // timestamp of the last log + filesRefCounter refCounter // keep reference-counted of decompressed files + notifyRotate *pubsub.Publisher + marshal logger.MarshalFunc + createDecoder makeDecoderFunc + getTailReader GetTailReaderFunc + perms os.FileMode +} + +type makeDecoderFunc func(rdr io.Reader) func() (*logger.Message, error) + +// SizeReaderAt defines a ReaderAt that also reports its size. +// This is used for tailing log files. +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +// GetTailReaderFunc is used to truncate a reader to only read as much as is required +// in order to get the passed in number of log lines. +// It returns the sectioned reader, the number of lines that the section reader +// contains, and any error that occurs. +type GetTailReaderFunc func(ctx context.Context, f SizeReaderAt, nLogLines int) (rdr io.Reader, nLines int, err error) + +// NewLogFile creates new LogFile +func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc, perms os.FileMode, getTailReader GetTailReaderFunc) (*LogFile, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, perms) + if err != nil { + return nil, err + } + + size, err := log.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + + return &LogFile{ + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + compress: compress, + filesRefCounter: refCounter{counter: make(map[string]int)}, + notifyRotate: pubsub.NewPublisher(0, 1), + marshal: marshaller, + createDecoder: decodeFunc, + perms: perms, + getTailReader: getTailReader, + }, nil +} + +// WriteLogEntry writes the provided log message to the current log file. +// This may trigger a rotation event if the max file/capacity limits are hit. +func (w *LogFile) WriteLogEntry(msg *logger.Message) error { + b, err := w.marshal(msg) + if err != nil { + return errors.Wrap(err, "error marshalling log message") + } + + logger.PutMessage(msg) + + w.mu.Lock() + if w.closed { + w.mu.Unlock() + return errors.New("cannot write because the output file was closed") + } + + if err := w.checkCapacityAndRotate(); err != nil { + w.mu.Unlock() + return err + } + + n, err := w.f.Write(b) + if err == nil { + w.currentSize += int64(n) + w.lastTimestamp = msg.Timestamp + } + w.mu.Unlock() + return err +} + +func (w *LogFile) checkCapacityAndRotate() error { + if w.capacity == -1 { + return nil + } + + if w.currentSize >= w.capacity { + w.rotateMu.Lock() + fname := w.f.Name() + if err := w.f.Close(); err != nil { + w.rotateMu.Unlock() + return errors.Wrap(err, "error closing file") + } + if err := rotate(fname, w.maxFiles, w.compress); err != nil { + w.rotateMu.Unlock() + return err + } + file, err := os.OpenFile(fname, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, w.perms) + if err != nil { + w.rotateMu.Unlock() + return err + } + w.f = file + w.currentSize = 0 + w.notifyRotate.Publish(struct{}{}) + + if w.maxFiles <= 1 || !w.compress { + w.rotateMu.Unlock() + return nil + } + + go func() { + compressFile(fname+".1", w.lastTimestamp) + w.rotateMu.Unlock() + }() + } + + return nil +} + +func rotate(name string, maxFiles int, compress bool) error { + if maxFiles < 2 { + return nil + } + + var extension string + if compress { + extension = ".gz" + } + + lastFile := fmt.Sprintf("%s.%d%s", name, maxFiles-1, extension) + err := os.Remove(lastFile) + if err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "error removing oldest log file") + } + + for i := maxFiles - 1; i > 1; i-- { + toPath := name + "." + strconv.Itoa(i) + extension + fromPath := name + "." + strconv.Itoa(i-1) + extension + if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { + return err + } + } + + if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +func compressFile(fileName string, lastTimestamp time.Time) { + file, err := os.Open(fileName) + if err != nil { + logrus.Errorf("Failed to open log file: %v", err) + return + } + defer func() { + file.Close() + err := os.Remove(fileName) + if err != nil { + logrus.Errorf("Failed to remove source log file: %v", err) + } + }() + + outFile, err := os.OpenFile(fileName+".gz", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0640) + if err != nil { + logrus.Errorf("Failed to open or create gzip log file: %v", err) + return + } + defer func() { + outFile.Close() + if err != nil { + os.Remove(fileName + ".gz") + } + }() + + compressWriter := gzip.NewWriter(outFile) + defer compressWriter.Close() + + // Add the last log entry timestramp to the gzip header + extra := rotateFileMetadata{} + extra.LastTime = lastTimestamp + compressWriter.Header.Extra, err = json.Marshal(&extra) + if err != nil { + // Here log the error only and don't return since this is just an optimization. + logrus.Warningf("Failed to marshal gzip header as JSON: %v", err) + } + + _, err = pools.Copy(compressWriter, file) + if err != nil { + logrus.WithError(err).WithField("module", "container.logs").WithField("file", fileName).Error("Error compressing log file") + return + } +} + +// MaxFiles return maximum number of files +func (w *LogFile) MaxFiles() int { + return w.maxFiles +} + +// Close closes underlying file and signals all readers to stop. +func (w *LogFile) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.closed { + return nil + } + if err := w.f.Close(); err != nil { + return err + } + w.closed = true + return nil +} + +// ReadLogs decodes entries from log files and sends them the passed in watcher +// +// Note: Using the follow option can become inconsistent in cases with very frequent rotations and max log files is 1. +// TODO: Consider a different implementation which can effectively follow logs under frequent rotations. +func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) { + w.mu.RLock() + currentFile, err := os.Open(w.f.Name()) + if err != nil { + w.mu.RUnlock() + watcher.Err <- err + return + } + defer currentFile.Close() + + currentChunk, err := newSectionReader(currentFile) + if err != nil { + w.mu.RUnlock() + watcher.Err <- err + return + } + + if config.Tail != 0 { + // TODO(@cpuguy83): Instead of opening every file, only get the files which + // are needed to tail. + // This is especially costly when compression is enabled. + files, err := w.openRotatedFiles(config) + w.mu.RUnlock() + if err != nil { + watcher.Err <- err + return + } + + closeFiles := func() { + for _, f := range files { + f.Close() + fileName := f.Name() + if strings.HasSuffix(fileName, tmpLogfileSuffix) { + err := w.filesRefCounter.Dereference(fileName) + if err != nil { + logrus.Errorf("Failed to dereference the log file %q: %v", fileName, err) + } + } + } + } + + readers := make([]SizeReaderAt, 0, len(files)+1) + for _, f := range files { + stat, err := f.Stat() + if err != nil { + watcher.Err <- errors.Wrap(err, "error reading size of rotated file") + closeFiles() + return + } + readers = append(readers, io.NewSectionReader(f, 0, stat.Size())) + } + if currentChunk.Size() > 0 { + readers = append(readers, currentChunk) + } + + tailFiles(readers, watcher, w.createDecoder, w.getTailReader, config) + closeFiles() + + w.mu.RLock() + } + + if !config.Follow || w.closed { + w.mu.RUnlock() + return + } + w.mu.RUnlock() + + notifyRotate := w.notifyRotate.Subscribe() + defer w.notifyRotate.Evict(notifyRotate) + followLogs(currentFile, watcher, notifyRotate, w.createDecoder, config.Since, config.Until) +} + +func (w *LogFile) openRotatedFiles(config logger.ReadConfig) (files []*os.File, err error) { + w.rotateMu.Lock() + defer w.rotateMu.Unlock() + + defer func() { + if err == nil { + return + } + for _, f := range files { + f.Close() + if strings.HasSuffix(f.Name(), tmpLogfileSuffix) { + err := os.Remove(f.Name()) + if err != nil && !os.IsNotExist(err) { + logrus.Warnf("Failed to remove logfile: %v", err) + } + } + } + }() + + for i := w.maxFiles; i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", w.f.Name(), i-1)) + if err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrap(err, "error opening rotated log file") + } + + fileName := fmt.Sprintf("%s.%d.gz", w.f.Name(), i-1) + decompressedFileName := fileName + tmpLogfileSuffix + tmpFile, err := w.filesRefCounter.GetReference(decompressedFileName, func(refFileName string, exists bool) (*os.File, error) { + if exists { + return os.Open(refFileName) + } + return decompressfile(fileName, refFileName, config.Since) + }) + + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return nil, errors.Wrap(err, "error getting reference to decompressed log file") + } + continue + } + if tmpFile == nil { + // The log before `config.Since` does not need to read + break + } + + files = append(files, tmpFile) + continue + } + files = append(files, f) + } + + return files, nil +} + +func decompressfile(fileName, destFileName string, since time.Time) (*os.File, error) { + cf, err := os.Open(fileName) + if err != nil { + return nil, errors.Wrap(err, "error opening file for decompression") + } + defer cf.Close() + + rc, err := gzip.NewReader(cf) + if err != nil { + return nil, errors.Wrap(err, "error making gzip reader for compressed log file") + } + defer rc.Close() + + // Extract the last log entry timestramp from the gzip header + extra := &rotateFileMetadata{} + err = json.Unmarshal(rc.Header.Extra, extra) + if err == nil && extra.LastTime.Before(since) { + return nil, nil + } + + rs, err := os.OpenFile(destFileName, os.O_CREATE|os.O_RDWR, 0640) + if err != nil { + return nil, errors.Wrap(err, "error creating file for copying decompressed log stream") + } + + _, err = pools.Copy(rs, rc) + if err != nil { + rs.Close() + rErr := os.Remove(rs.Name()) + if rErr != nil && !os.IsNotExist(rErr) { + logrus.Errorf("Failed to remove logfile: %v", rErr) + } + return nil, errors.Wrap(err, "error while copying decompressed log stream to file") + } + + return rs, nil +} + +func newSectionReader(f *os.File) (*io.SectionReader, error) { + // seek to the end to get the size + // we'll leave this at the end of the file since section reader does not advance the reader + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, errors.Wrap(err, "error getting current file size") + } + return io.NewSectionReader(f, 0, size), nil +} + +func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, createDecoder makeDecoderFunc, getTailReader GetTailReaderFunc, config logger.ReadConfig) { + nLines := config.Tail + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // TODO(@cpuguy83): we should plumb a context through instead of dealing with `WatchClose()` here. + go func() { + select { + case <-ctx.Done(): + case <-watcher.WatchConsumerGone(): + cancel() + } + }() + + readers := make([]io.Reader, 0, len(files)) + + if config.Tail > 0 { + for i := len(files) - 1; i >= 0 && nLines > 0; i-- { + tail, n, err := getTailReader(ctx, files[i], nLines) + if err != nil { + watcher.Err <- errors.Wrap(err, "error finding file position to start log tailing") + return + } + nLines -= n + readers = append([]io.Reader{tail}, readers...) + } + } else { + for _, r := range files { + readers = append(readers, &wrappedReaderAt{ReaderAt: r}) + } + } + + rdr := io.MultiReader(readers...) + decodeLogLine := createDecoder(rdr) + for { + msg, err := decodeLogLine() + if err != nil { + if errors.Cause(err) != io.EOF { + watcher.Err <- err + } + return + } + if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { + continue + } + if !config.Until.IsZero() && msg.Timestamp.After(config.Until) { + return + } + select { + case <-ctx.Done(): + return + case watcher.Msg <- msg: + } + } +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, createDecoder makeDecoderFunc, since, until time.Time) { + decodeLogLine := createDecoder(f) + + name := f.Name() + fileWatcher, err := watchFile(name) + if err != nil { + logWatcher.Err <- err + return + } + defer func() { + f.Close() + fileWatcher.Close() + }() + + var retries int + handleRotate := func() error { + f.Close() + fileWatcher.Remove(name) + + // retry when the file doesn't exist + for retries := 0; retries <= 5; retries++ { + f, err = os.Open(name) + if err == nil || !os.IsNotExist(err) { + break + } + } + if err != nil { + return err + } + if err := fileWatcher.Add(name); err != nil { + return err + } + decodeLogLine = createDecoder(f) + return nil + } + + errRetry := errors.New("retry") + errDone := errors.New("done") + waitRead := func() error { + select { + case e := <-fileWatcher.Events(): + switch e.Op { + case fsnotify.Write: + decodeLogLine = createDecoder(f) + return nil + case fsnotify.Rename, fsnotify.Remove: + select { + case <-notifyRotate: + case <-logWatcher.WatchProducerGone(): + return errDone + case <-logWatcher.WatchConsumerGone(): + return errDone + } + if err := handleRotate(); err != nil { + return err + } + return nil + } + return errRetry + case err := <-fileWatcher.Errors(): + logrus.Debugf("logger got error watching file: %v", err) + // Something happened, let's try and stay alive and create a new watcher + if retries <= 5 { + fileWatcher.Close() + fileWatcher, err = watchFile(name) + if err != nil { + return err + } + retries++ + return errRetry + } + return err + case <-logWatcher.WatchProducerGone(): + return errDone + case <-logWatcher.WatchConsumerGone(): + return errDone + } + } + + handleDecodeErr := func(err error) error { + if errors.Cause(err) != io.EOF { + return err + } + + for { + err := waitRead() + if err == nil { + break + } + if err == errRetry { + continue + } + return err + } + return nil + } + + // main loop + for { + msg, err := decodeLogLine() + if err != nil { + if err := handleDecodeErr(err); err != nil { + if err == errDone { + return + } + // we got an unrecoverable error, so return + logWatcher.Err <- err + return + } + // ready to try again + continue + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + if !until.IsZero() && msg.Timestamp.After(until) { + return + } + // send the message, unless the consumer is gone + select { + case logWatcher.Msg <- msg: + case <-logWatcher.WatchConsumerGone(): + return + } + } +} + +func watchFile(name string) (filenotify.FileWatcher, error) { + var fileWatcher filenotify.FileWatcher + + if runtime.GOOS == "windows" { + // FileWatcher on Windows files is based on the syscall notifications which has an issue because of file caching. + // It is based on ReadDirectoryChangesW() which doesn't detect writes to the cache. It detects writes to disk only. + // Because of the OS lazy writing, we don't get notifications for file writes and thereby the watcher + // doesn't work. Hence for Windows we will use poll based notifier. + fileWatcher = filenotify.NewPollingWatcher() + } else { + var err error + fileWatcher, err = filenotify.New() + if err != nil { + return nil, err + } + } + + logger := logrus.WithFields(logrus.Fields{ + "module": "logger", + "file": name, + }) + + if err := fileWatcher.Add(name); err != nil { + // we will retry using file poller. + logger.WithError(err).Warnf("falling back to file poller") + fileWatcher.Close() + fileWatcher = filenotify.NewPollingWatcher() + + if err := fileWatcher.Add(name); err != nil { + fileWatcher.Close() + logger.WithError(err).Debugf("error watching log file for modifications") + return nil, err + } + } + + return fileWatcher, nil +} + +type wrappedReaderAt struct { + io.ReaderAt + pos int64 +} + +func (r *wrappedReaderAt) Read(p []byte) (int, error) { + n, err := r.ReaderAt.ReadAt(p, r.pos) + r.pos += int64(n) + return n, err +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/loginfo.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/loginfo.go new file mode 100644 index 000000000..4c48235f5 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/loginfo.go @@ -0,0 +1,129 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "fmt" + "os" + "regexp" + "strings" + "time" +) + +// Info provides enough information for a logging driver to do its function. +type Info struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string + DaemonName string +} + +// ExtraAttributes returns the user-defined extra attributes (labels, +// environment variables) in key-value format. This can be used by log drivers +// that support metadata to add more context to a log. +func (info *Info) ExtraAttributes(keyMod func(string) string) (map[string]string, error) { + extra := make(map[string]string) + labels, ok := info.Config["labels"] + if ok && len(labels) > 0 { + for _, l := range strings.Split(labels, ",") { + if v, ok := info.ContainerLabels[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + envMapping := make(map[string]string) + for _, e := range info.ContainerEnv { + if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { + envMapping[kv[0]] = kv[1] + } + } + + env, ok := info.Config["env"] + if ok && len(env) > 0 { + for _, l := range strings.Split(env, ",") { + if v, ok := envMapping[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + envRegex, ok := info.Config["env-regex"] + if ok && len(envRegex) > 0 { + re, err := regexp.Compile(envRegex) + if err != nil { + return nil, err + } + for k, v := range envMapping { + if re.MatchString(k) { + if keyMod != nil { + k = keyMod(k) + } + extra[k] = v + } + } + } + + return extra, nil +} + +// Hostname returns the hostname from the underlying OS. +func (info *Info) Hostname() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("logger: can not resolve hostname: %v", err) + } + return hostname, nil +} + +// Command returns the command that the container being logged was +// started with. The Entrypoint is prepended to the container +// arguments. +func (info *Info) Command() string { + terms := []string{info.ContainerEntrypoint} + terms = append(terms, info.ContainerArgs...) + command := strings.Join(terms, " ") + return command +} + +// ID Returns the Container ID shortened to 12 characters. +func (info *Info) ID() string { + return info.ContainerID[:12] +} + +// FullID is an alias of ContainerID. +func (info *Info) FullID() string { + return info.ContainerID +} + +// Name returns the ContainerName without a preceding '/'. +func (info *Info) Name() string { + return strings.TrimPrefix(info.ContainerName, "/") +} + +// ImageID returns the ContainerImageID shortened to 12 characters. +func (info *Info) ImageID() string { + return info.ContainerImageID[:12] +} + +// ImageFullID is an alias of ContainerImageID. +func (info *Info) ImageFullID() string { + return info.ContainerImageID +} + +// ImageName is an alias of ContainerImageName +func (info *Info) ImageName() string { + return info.ContainerImageName +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/metrics.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/metrics.go new file mode 100644 index 000000000..b7dfd38ec --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/metrics.go @@ -0,0 +1,21 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "github.com/docker/go-metrics" +) + +var ( + logWritesFailedCount metrics.Counter + logReadsFailedCount metrics.Counter + totalPartialLogs metrics.Counter +) + +func init() { + loggerMetrics := metrics.NewNamespace("logger", "", nil) + + logWritesFailedCount = loggerMetrics.NewCounter("log_write_operations_failed", "Number of log write operations that failed") + logReadsFailedCount = loggerMetrics.NewCounter("log_read_operations_failed", "Number of log reads from container stdio that failed") + totalPartialLogs = loggerMetrics.NewCounter("log_entries_size_greater_than_buffer", "Number of log entries which are larger than the log buffer") + + metrics.Register(loggerMetrics) +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/plugin.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/plugin.go new file mode 100644 index 000000000..c66540ce5 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/plugin.go @@ -0,0 +1,116 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/api/types/plugins/logdriver" + "github.com/docker/docker/errdefs" + getter "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" +) + +var pluginGetter getter.PluginGetter + +const extName = "LogDriver" + +// logPlugin defines the available functions that logging plugins must implement. +type logPlugin interface { + StartLogging(streamPath string, info Info) (err error) + StopLogging(streamPath string) (err error) + Capabilities() (cap Capability, err error) + ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) +} + +// RegisterPluginGetter sets the plugingetter +func RegisterPluginGetter(plugingetter getter.PluginGetter) { + pluginGetter = plugingetter +} + +// GetDriver returns a logging driver by its name. +// If the driver is empty, it looks for the local driver. +func getPlugin(name string, mode int) (Creator, error) { + p, err := pluginGetter.Get(name, extName, mode) + if err != nil { + return nil, fmt.Errorf("error looking up logging plugin %s: %v", name, err) + } + + client, err := makePluginClient(p) + if err != nil { + return nil, err + } + return makePluginCreator(name, client, p.ScopedPath), nil +} + +func makePluginClient(p getter.CompatPlugin) (logPlugin, error) { + if pc, ok := p.(getter.PluginWithV1Client); ok { + return &logPluginProxy{pc.Client()}, nil + } + pa, ok := p.(getter.PluginAddr) + if !ok { + return nil, errdefs.System(errors.Errorf("got unknown plugin type %T", p)) + } + + if pa.Protocol() != plugins.ProtocolSchemeHTTPV1 { + return nil, errors.Errorf("plugin protocol not supported: %s", p) + } + + addr := pa.Addr() + c, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, pa.Timeout()) + if err != nil { + return nil, errors.Wrap(err, "error making plugin client") + } + return &logPluginProxy{c}, nil +} + +func makePluginCreator(name string, l logPlugin, scopePath func(s string) string) Creator { + return func(logCtx Info) (logger Logger, err error) { + defer func() { + if err != nil { + pluginGetter.Get(name, extName, getter.Release) + } + }() + + unscopedPath := filepath.Join("/", "run", "docker", "logging") + logRoot := scopePath(unscopedPath) + if err := os.MkdirAll(logRoot, 0700); err != nil { + return nil, err + } + + id := stringid.GenerateNonCryptoID() + a := &pluginAdapter{ + driverName: name, + id: id, + plugin: l, + fifoPath: filepath.Join(logRoot, id), + logInfo: logCtx, + } + + cap, err := a.plugin.Capabilities() + if err == nil { + a.capabilities = cap + } + + stream, err := openPluginStream(a) + if err != nil { + return nil, err + } + + a.stream = stream + a.enc = logdriver.NewLogEntryEncoder(a.stream) + + if err := l.StartLogging(filepath.Join(unscopedPath, id), logCtx); err != nil { + return nil, errors.Wrapf(err, "error creating logger") + } + + if cap.ReadLogs { + return &pluginAdapterWithRead{a}, nil + } + + return a, nil + } +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go new file mode 100644 index 000000000..e9a16af9b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go @@ -0,0 +1,23 @@ +// +build linux freebsd + +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "context" + "io" + + "github.com/containerd/fifo" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { + // Make sure to also open with read (in addition to write) to avoid borken pipe errors on plugin failure. + // It is up to the plugin to keep track of pipes that it should re-attach to, however. + // If the plugin doesn't open for reads, then the container will block once the pipe is full. + f, err := fifo.OpenFifo(context.Background(), a.fifoPath, unix.O_RDWR|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, errors.Wrapf(err, "error creating i/o pipe for log plugin: %s", a.Name()) + } + return f, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go new file mode 100644 index 000000000..2ad47cc07 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!freebsd + +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "errors" + "io" +) + +func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { + return nil, errors.New("log plugin not supported") +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/proxy.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/proxy.go new file mode 100644 index 000000000..4a1c77810 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/proxy.go @@ -0,0 +1,107 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "errors" + "io" +) + +type client interface { + Call(string, interface{}, interface{}) error + Stream(string, interface{}) (io.ReadCloser, error) +} + +type logPluginProxy struct { + client +} + +type logPluginProxyStartLoggingRequest struct { + File string + Info Info +} + +type logPluginProxyStartLoggingResponse struct { + Err string +} + +func (pp *logPluginProxy) StartLogging(file string, info Info) (err error) { + var ( + req logPluginProxyStartLoggingRequest + ret logPluginProxyStartLoggingResponse + ) + + req.File = file + req.Info = info + if err = pp.Call("LogDriver.StartLogging", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyStopLoggingRequest struct { + File string +} + +type logPluginProxyStopLoggingResponse struct { + Err string +} + +func (pp *logPluginProxy) StopLogging(file string) (err error) { + var ( + req logPluginProxyStopLoggingRequest + ret logPluginProxyStopLoggingResponse + ) + + req.File = file + if err = pp.Call("LogDriver.StopLogging", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyCapabilitiesResponse struct { + Cap Capability + Err string +} + +func (pp *logPluginProxy) Capabilities() (cap Capability, err error) { + var ( + ret logPluginProxyCapabilitiesResponse + ) + + if err = pp.Call("LogDriver.Capabilities", nil, &ret); err != nil { + return + } + + cap = ret.Cap + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyReadLogsRequest struct { + Info Info + Config ReadConfig +} + +func (pp *logPluginProxy) ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) { + var ( + req logPluginProxyReadLogsRequest + ) + + req.Info = info + req.Config = config + return pp.Stream("LogDriver.ReadLogs", req) +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/ring.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/ring.go new file mode 100644 index 000000000..c675c1e83 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/ring.go @@ -0,0 +1,223 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "errors" + "sync" + "sync/atomic" + + "github.com/sirupsen/logrus" +) + +const ( + defaultRingMaxSize = 1e6 // 1MB +) + +// RingLogger is a ring buffer that implements the Logger interface. +// This is used when lossy logging is OK. +type RingLogger struct { + buffer *messageRing + l Logger + logInfo Info + closeFlag int32 +} + +type ringWithReader struct { + *RingLogger +} + +func (r *ringWithReader) ReadLogs(cfg ReadConfig) *LogWatcher { + reader, ok := r.l.(LogReader) + if !ok { + // something is wrong if we get here + panic("expected log reader") + } + return reader.ReadLogs(cfg) +} + +func newRingLogger(driver Logger, logInfo Info, maxSize int64) *RingLogger { + l := &RingLogger{ + buffer: newRing(maxSize), + l: driver, + logInfo: logInfo, + } + go l.run() + return l +} + +// NewRingLogger creates a new Logger that is implemented as a RingBuffer wrapping +// the passed in logger. +func NewRingLogger(driver Logger, logInfo Info, maxSize int64) Logger { + if maxSize < 0 { + maxSize = defaultRingMaxSize + } + l := newRingLogger(driver, logInfo, maxSize) + if _, ok := driver.(LogReader); ok { + return &ringWithReader{l} + } + return l +} + +// Log queues messages into the ring buffer +func (r *RingLogger) Log(msg *Message) error { + if r.closed() { + return errClosed + } + return r.buffer.Enqueue(msg) +} + +// Name returns the name of the underlying logger +func (r *RingLogger) Name() string { + return r.l.Name() +} + +func (r *RingLogger) closed() bool { + return atomic.LoadInt32(&r.closeFlag) == 1 +} + +func (r *RingLogger) setClosed() { + atomic.StoreInt32(&r.closeFlag, 1) +} + +// Close closes the logger +func (r *RingLogger) Close() error { + r.setClosed() + r.buffer.Close() + // empty out the queue + var logErr bool + for _, msg := range r.buffer.Drain() { + if logErr { + // some error logging a previous message, so re-insert to message pool + // and assume log driver is hosed + PutMessage(msg) + continue + } + + if err := r.l.Log(msg); err != nil { + logrus.WithField("driver", r.l.Name()). + WithField("container", r.logInfo.ContainerID). + WithError(err). + Errorf("Error writing log message") + logErr = true + } + } + return r.l.Close() +} + +// run consumes messages from the ring buffer and forwards them to the underling +// logger. +// This is run in a goroutine when the RingLogger is created +func (r *RingLogger) run() { + for { + if r.closed() { + return + } + msg, err := r.buffer.Dequeue() + if err != nil { + // buffer is closed + return + } + if err := r.l.Log(msg); err != nil { + logrus.WithField("driver", r.l.Name()). + WithField("container", r.logInfo.ContainerID). + WithError(err). + Errorf("Error writing log message") + } + } +} + +type messageRing struct { + mu sync.Mutex + // signals callers of `Dequeue` to wake up either on `Close` or when a new `Message` is added + wait *sync.Cond + + sizeBytes int64 // current buffer size + maxBytes int64 // max buffer size size + queue []*Message + closed bool +} + +func newRing(maxBytes int64) *messageRing { + queueSize := 1000 + if maxBytes == 0 || maxBytes == 1 { + // With 0 or 1 max byte size, the maximum size of the queue would only ever be 1 + // message long. + queueSize = 1 + } + + r := &messageRing{queue: make([]*Message, 0, queueSize), maxBytes: maxBytes} + r.wait = sync.NewCond(&r.mu) + return r +} + +// Enqueue adds a message to the buffer queue +// If the message is too big for the buffer it drops the new message. +// If there are no messages in the queue and the message is still too big, it adds the message anyway. +func (r *messageRing) Enqueue(m *Message) error { + mSize := int64(len(m.Line)) + + r.mu.Lock() + if r.closed { + r.mu.Unlock() + return errClosed + } + if mSize+r.sizeBytes > r.maxBytes && len(r.queue) > 0 { + r.wait.Signal() + r.mu.Unlock() + return nil + } + + r.queue = append(r.queue, m) + r.sizeBytes += mSize + r.wait.Signal() + r.mu.Unlock() + return nil +} + +// Dequeue pulls a message off the queue +// If there are no messages, it waits for one. +// If the buffer is closed, it will return immediately. +func (r *messageRing) Dequeue() (*Message, error) { + r.mu.Lock() + for len(r.queue) == 0 && !r.closed { + r.wait.Wait() + } + + if r.closed { + r.mu.Unlock() + return nil, errClosed + } + + msg := r.queue[0] + r.queue = r.queue[1:] + r.sizeBytes -= int64(len(msg.Line)) + r.mu.Unlock() + return msg, nil +} + +var errClosed = errors.New("closed") + +// Close closes the buffer ensuring no new messages can be added. +// Any callers waiting to dequeue a message will be woken up. +func (r *messageRing) Close() { + r.mu.Lock() + if r.closed { + r.mu.Unlock() + return + } + + r.closed = true + r.wait.Broadcast() + r.mu.Unlock() +} + +// Drain drains all messages from the queue. +// This can be used after `Close()` to get any remaining messages that were in queue. +func (r *messageRing) Drain() []*Message { + r.mu.Lock() + ls := make([]*Message, 0, len(r.queue)) + ls = append(ls, r.queue...) + r.sizeBytes = 0 + r.queue = r.queue[:0] + r.mu.Unlock() + return ls +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/logger/templates/templates.go b/extender/code/vendor/github.com/docker/docker/daemon/logger/templates/templates.go new file mode 100644 index 000000000..ab76d0f1c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/logger/templates/templates.go @@ -0,0 +1,50 @@ +package templates // import "github.com/docker/docker/daemon/logger/templates" + +import ( + "bytes" + "encoding/json" + "strings" + "text/template" +) + +// basicFunctions are the set of initial +// functions provided to every template. +var basicFunctions = template.FuncMap{ + "json": func(v interface{}) string { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + enc.Encode(v) + // Remove the trailing new line added by the encoder + return strings.TrimSpace(buf.String()) + }, + "split": strings.Split, + "join": strings.Join, + "title": strings.Title, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "pad": padWithSpace, + "truncate": truncateWithLength, +} + +// NewParse creates a new tagged template with the basic functions +// and parses the given format. +func NewParse(tag, format string) (*template.Template, error) { + return template.New(tag).Funcs(basicFunctions).Parse(format) +} + +// padWithSpace adds whitespace to the input if the input is non-empty +func padWithSpace(source string, prefix, suffix int) string { + if source == "" { + return source + } + return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix) +} + +// truncateWithLength truncates the source string up to the length provided by the input +func truncateWithLength(source string, length int) string { + if len(source) < length { + return source + } + return source[:length] +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/network/filter.go b/extender/code/vendor/github.com/docker/docker/daemon/network/filter.go new file mode 100644 index 000000000..258a0f444 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/network/filter.go @@ -0,0 +1,130 @@ +package network // import "github.com/docker/docker/daemon/network" + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/runconfig" + "github.com/pkg/errors" +) + +// FilterNetworks filters network list according to user specified filter +// and returns user chosen networks +func FilterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) { + // if filter is empty, return original network list + if filter.Len() == 0 { + return nws, nil + } + + displayNet := nws[:0] + for _, nw := range nws { + if filter.Contains("driver") { + if !filter.ExactMatch("driver", nw.Driver) { + continue + } + } + if filter.Contains("name") { + if !filter.Match("name", nw.Name) { + continue + } + } + if filter.Contains("id") { + if !filter.Match("id", nw.ID) { + continue + } + } + if filter.Contains("label") { + if !filter.MatchKVList("label", nw.Labels) { + continue + } + } + if filter.Contains("scope") { + if !filter.ExactMatch("scope", nw.Scope) { + continue + } + } + + if filter.Contains("idOrName") { + if !filter.Match("name", nw.Name) && !filter.Match("id", nw.Name) { + continue + } + } + displayNet = append(displayNet, nw) + } + + if values := filter.Get("dangling"); len(values) > 0 { + if len(values) > 1 { + return nil, errdefs.InvalidParameter(errors.New(`got more than one value for filter key "dangling"`)) + } + + var danglingOnly bool + switch values[0] { + case "0", "false": + // dangling is false already + case "1", "true": + danglingOnly = true + default: + return nil, errdefs.InvalidParameter(errors.New(`invalid value for filter 'dangling', must be "true" (or "1"), or "false" (or "0")`)) + } + + displayNet = filterNetworkByUse(displayNet, danglingOnly) + } + + if filter.Contains("type") { + typeNet := []types.NetworkResource{} + errFilter := filter.WalkValues("type", func(fval string) error { + passList, err := filterNetworkByType(displayNet, fval) + if err != nil { + return err + } + typeNet = append(typeNet, passList...) + return nil + }) + if errFilter != nil { + return nil, errFilter + } + displayNet = typeNet + } + + return displayNet, nil +} + +func filterNetworkByUse(nws []types.NetworkResource, danglingOnly bool) []types.NetworkResource { + retNws := []types.NetworkResource{} + + filterFunc := func(nw types.NetworkResource) bool { + if danglingOnly { + return !runconfig.IsPreDefinedNetwork(nw.Name) && len(nw.Containers) == 0 && len(nw.Services) == 0 + } + return runconfig.IsPreDefinedNetwork(nw.Name) || len(nw.Containers) > 0 || len(nw.Services) > 0 + } + + for _, nw := range nws { + if filterFunc(nw) { + retNws = append(retNws, nw) + } + } + + return retNws +} + +func filterNetworkByType(nws []types.NetworkResource, netType string) ([]types.NetworkResource, error) { + retNws := []types.NetworkResource{} + switch netType { + case "builtin": + for _, nw := range nws { + if runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + case "custom": + for _, nw := range nws { + if !runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + default: + return nil, errors.Errorf("invalid filter: 'type'='%s'", netType) + } + return retNws, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/daemon/network/settings.go b/extender/code/vendor/github.com/docker/docker/daemon/network/settings.go new file mode 100644 index 000000000..7696d4020 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/daemon/network/settings.go @@ -0,0 +1,81 @@ +package network // import "github.com/docker/docker/daemon/network" + +import ( + "net" + "sync" + + networktypes "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" +) + +// Settings stores configuration details about the daemon network config +// TODO Windows. Many of these fields can be factored out., +type Settings struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Networks map[string]*EndpointSettings + Service *clustertypes.ServiceConfig + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []networktypes.Address + SecondaryIPv6Addresses []networktypes.Address + IsAnonymousEndpoint bool + HasSwarmEndpoint bool +} + +// EndpointSettings is a package local wrapper for +// networktypes.EndpointSettings which stores Endpoint state that +// needs to be persisted to disk but not exposed in the api. +type EndpointSettings struct { + *networktypes.EndpointSettings + IPAMOperational bool +} + +// AttachmentStore stores the load balancer IP address for a network id. +type AttachmentStore struct { + sync.Mutex + //key: networkd id + //value: load balancer ip address + networkToNodeLBIP map[string]net.IP +} + +// ResetAttachments clears any existing load balancer IP to network mapping and +// sets the mapping to the given attachments. +func (store *AttachmentStore) ResetAttachments(attachments map[string]string) error { + store.Lock() + defer store.Unlock() + store.clearAttachments() + for nid, nodeIP := range attachments { + ip, _, err := net.ParseCIDR(nodeIP) + if err != nil { + store.networkToNodeLBIP = make(map[string]net.IP) + return errors.Wrapf(err, "Failed to parse load balancer address %s", nodeIP) + } + store.networkToNodeLBIP[nid] = ip + } + return nil +} + +// ClearAttachments clears all the mappings of network to load balancer IP Address. +func (store *AttachmentStore) ClearAttachments() { + store.Lock() + defer store.Unlock() + store.clearAttachments() +} + +func (store *AttachmentStore) clearAttachments() { + store.networkToNodeLBIP = make(map[string]net.IP) +} + +// GetIPForNetwork return the load balancer IP address for the given network. +func (store *AttachmentStore) GetIPForNetwork(networkID string) (net.IP, bool) { + store.Lock() + defer store.Unlock() + ip, exists := store.networkToNodeLBIP[networkID] + return ip, exists +} diff --git a/extender/code/vendor/github.com/docker/docker/dockerversion/useragent.go b/extender/code/vendor/github.com/docker/docker/dockerversion/useragent.go new file mode 100644 index 000000000..afbdcd858 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/dockerversion/useragent.go @@ -0,0 +1,76 @@ +package dockerversion // import "github.com/docker/docker/dockerversion" + +import ( + "context" + "fmt" + "runtime" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" +) + +// UAStringKey is used as key type for user-agent string in net/context struct +type UAStringKey struct{} + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(UAStringKey{}) + if ki != nil { + upstreamUA = ctx.Value(UAStringKey{}).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapableRune := range charsToEscape { + if currRune == escapableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/extender/code/vendor/github.com/docker/docker/dockerversion/version_lib.go b/extender/code/vendor/github.com/docker/docker/dockerversion/version_lib.go new file mode 100644 index 000000000..b7d465044 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/dockerversion/version_lib.go @@ -0,0 +1,17 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion // import "github.com/docker/docker/dockerversion" + +// Default build-time variable for library-import. +// This file is overridden on build with build-time information. +const ( + GitCommit = "library-import" + Version = "library-import" + BuildTime = "library-import" + IAmStatic = "library-import" + InitCommitID = "library-import" + PlatformName = "" + ProductName = "" + DefaultProductLicense = "" +) diff --git a/extender/code/vendor/github.com/docker/docker/errdefs/defs.go b/extender/code/vendor/github.com/docker/docker/errdefs/defs.go new file mode 100644 index 000000000..e6a2275b2 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/errdefs/defs.go @@ -0,0 +1,74 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +// ErrNotFound signals that the requested object doesn't exist +type ErrNotFound interface { + NotFound() +} + +// ErrInvalidParameter signals that the user input is invalid +type ErrInvalidParameter interface { + InvalidParameter() +} + +// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. +// A change in state should be able to clear this error. +type ErrConflict interface { + Conflict() +} + +// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action +type ErrUnauthorized interface { + Unauthorized() +} + +// ErrUnavailable signals that the requested action/subsystem is not available. +type ErrUnavailable interface { + Unavailable() +} + +// ErrForbidden signals that the requested action cannot be performed under any circumstances. +// When a ErrForbidden is returned, the caller should never retry the action. +type ErrForbidden interface { + Forbidden() +} + +// ErrSystem signals that some internal error occurred. +// An example of this would be a failed mount request. +type ErrSystem interface { + System() +} + +// ErrNotModified signals that an action can't be performed because it's already in the desired state +type ErrNotModified interface { + NotModified() +} + +// ErrAlreadyExists is a special case of ErrConflict which signals that the desired object already exists +type ErrAlreadyExists interface { + AlreadyExists() +} + +// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. +type ErrNotImplemented interface { + NotImplemented() +} + +// ErrUnknown signals that the kind of error that occurred is not known. +type ErrUnknown interface { + Unknown() +} + +// ErrCancelled signals that the action was cancelled. +type ErrCancelled interface { + Cancelled() +} + +// ErrDeadline signals that the deadline was reached before the action completed. +type ErrDeadline interface { + DeadlineExceeded() +} + +// ErrDataLoss indicates that data was lost or there is data corruption. +type ErrDataLoss interface { + DataLoss() +} diff --git a/extender/code/vendor/github.com/docker/docker/errdefs/doc.go b/extender/code/vendor/github.com/docker/docker/errdefs/doc.go new file mode 100644 index 000000000..c211f174f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/errdefs/doc.go @@ -0,0 +1,8 @@ +// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. +// Errors that cross the package boundary should implement one (and only one) of these interfaces. +// +// Packages should not reference these interfaces directly, only implement them. +// To check if a particular error implements one of these interfaces, there are helper +// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. +// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). +package errdefs // import "github.com/docker/docker/errdefs" diff --git a/extender/code/vendor/github.com/docker/docker/errdefs/helpers.go b/extender/code/vendor/github.com/docker/docker/errdefs/helpers.go new file mode 100644 index 000000000..a28881caf --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/errdefs/helpers.go @@ -0,0 +1,243 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import "context" + +type errNotFound struct{ error } + +func (errNotFound) NotFound() {} + +func (e errNotFound) Cause() error { + return e.error +} + +// NotFound is a helper to create an error of the class with the same name from any error type +func NotFound(err error) error { + if err == nil || IsNotFound(err) { + return err + } + return errNotFound{err} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { + return e.error +} + +// InvalidParameter is a helper to create an error of the class with the same name from any error type +func InvalidParameter(err error) error { + if err == nil || IsInvalidParameter(err) { + return err + } + return errInvalidParameter{err} +} + +type errConflict struct{ error } + +func (errConflict) Conflict() {} + +func (e errConflict) Cause() error { + return e.error +} + +// Conflict is a helper to create an error of the class with the same name from any error type +func Conflict(err error) error { + if err == nil || IsConflict(err) { + return err + } + return errConflict{err} +} + +type errUnauthorized struct{ error } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) Cause() error { + return e.error +} + +// Unauthorized is a helper to create an error of the class with the same name from any error type +func Unauthorized(err error) error { + if err == nil || IsUnauthorized(err) { + return err + } + return errUnauthorized{err} +} + +type errUnavailable struct{ error } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) Cause() error { + return e.error +} + +// Unavailable is a helper to create an error of the class with the same name from any error type +func Unavailable(err error) error { + if err == nil || IsUnavailable(err) { + return err + } + return errUnavailable{err} +} + +type errForbidden struct{ error } + +func (errForbidden) Forbidden() {} + +func (e errForbidden) Cause() error { + return e.error +} + +// Forbidden is a helper to create an error of the class with the same name from any error type +func Forbidden(err error) error { + if err == nil || IsForbidden(err) { + return err + } + return errForbidden{err} +} + +type errSystem struct{ error } + +func (errSystem) System() {} + +func (e errSystem) Cause() error { + return e.error +} + +// System is a helper to create an error of the class with the same name from any error type +func System(err error) error { + if err == nil || IsSystem(err) { + return err + } + return errSystem{err} +} + +type errNotModified struct{ error } + +func (errNotModified) NotModified() {} + +func (e errNotModified) Cause() error { + return e.error +} + +// NotModified is a helper to create an error of the class with the same name from any error type +func NotModified(err error) error { + if err == nil || IsNotModified(err) { + return err + } + return errNotModified{err} +} + +type errAlreadyExists struct{ error } + +func (errAlreadyExists) AlreadyExists() {} + +func (e errAlreadyExists) Cause() error { + return e.error +} + +// AlreadyExists is a helper to create an error of the class with the same name from any error type +func AlreadyExists(err error) error { + if err == nil || IsAlreadyExists(err) { + return err + } + return errAlreadyExists{err} +} + +type errNotImplemented struct{ error } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) Cause() error { + return e.error +} + +// NotImplemented is a helper to create an error of the class with the same name from any error type +func NotImplemented(err error) error { + if err == nil || IsNotImplemented(err) { + return err + } + return errNotImplemented{err} +} + +type errUnknown struct{ error } + +func (errUnknown) Unknown() {} + +func (e errUnknown) Cause() error { + return e.error +} + +// Unknown is a helper to create an error of the class with the same name from any error type +func Unknown(err error) error { + if err == nil || IsUnknown(err) { + return err + } + return errUnknown{err} +} + +type errCancelled struct{ error } + +func (errCancelled) Cancelled() {} + +func (e errCancelled) Cause() error { + return e.error +} + +// Cancelled is a helper to create an error of the class with the same name from any error type +func Cancelled(err error) error { + if err == nil || IsCancelled(err) { + return err + } + return errCancelled{err} +} + +type errDeadline struct{ error } + +func (errDeadline) DeadlineExceeded() {} + +func (e errDeadline) Cause() error { + return e.error +} + +// Deadline is a helper to create an error of the class with the same name from any error type +func Deadline(err error) error { + if err == nil || IsDeadline(err) { + return err + } + return errDeadline{err} +} + +type errDataLoss struct{ error } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) Cause() error { + return e.error +} + +// DataLoss is a helper to create an error of the class with the same name from any error type +func DataLoss(err error) error { + if err == nil || IsDataLoss(err) { + return err + } + return errDataLoss{err} +} + +// FromContext returns the error class from the passed in context +func FromContext(ctx context.Context) error { + e := ctx.Err() + if e == nil { + return nil + } + + if e == context.Canceled { + return Cancelled(e) + } + if e == context.DeadlineExceeded { + return Deadline(e) + } + return Unknown(e) +} diff --git a/extender/code/vendor/github.com/docker/docker/errdefs/http_helpers.go b/extender/code/vendor/github.com/docker/docker/errdefs/http_helpers.go new file mode 100644 index 000000000..9884eb86b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -0,0 +1,172 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import ( + "fmt" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// GetHTTPErrorStatusCode retrieves status code from error message. +func GetHTTPErrorStatusCode(err error) int { + if err == nil { + logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + return http.StatusInternalServerError + } + + var statusCode int + + // Stop right there + // Are you sure you should be adding a new error class here? Do one of the existing ones work? + + // Note that the below functions are already checking the error causal chain for matches. + switch { + case IsNotFound(err): + statusCode = http.StatusNotFound + case IsInvalidParameter(err): + statusCode = http.StatusBadRequest + case IsConflict(err) || IsAlreadyExists(err): + statusCode = http.StatusConflict + case IsUnauthorized(err): + statusCode = http.StatusUnauthorized + case IsUnavailable(err): + statusCode = http.StatusServiceUnavailable + case IsForbidden(err): + statusCode = http.StatusForbidden + case IsNotModified(err): + statusCode = http.StatusNotModified + case IsNotImplemented(err): + statusCode = http.StatusNotImplemented + case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err): + statusCode = http.StatusInternalServerError + default: + statusCode = statusCodeFromGRPCError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + statusCode = statusCodeFromDistributionError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + if e, ok := err.(causer); ok { + return GetHTTPErrorStatusCode(e.Cause()) + } + + logrus.WithFields(logrus.Fields{ + "module": "api", + "error_type": fmt.Sprintf("%T", err), + }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + return statusCode +} + +// FromStatusCode creates an errdef error, based on the provided HTTP status-code +func FromStatusCode(err error, statusCode int) error { + if err == nil { + return err + } + switch statusCode { + case http.StatusNotFound: + err = NotFound(err) + case http.StatusBadRequest: + err = InvalidParameter(err) + case http.StatusConflict: + err = Conflict(err) + case http.StatusUnauthorized: + err = Unauthorized(err) + case http.StatusServiceUnavailable: + err = Unavailable(err) + case http.StatusForbidden: + err = Forbidden(err) + case http.StatusNotModified: + err = NotModified(err) + case http.StatusNotImplemented: + err = NotImplemented(err) + case http.StatusInternalServerError: + if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { + err = System(err) + } + default: + logrus.WithFields(logrus.Fields{ + "module": "api", + "status_code": fmt.Sprintf("%d", statusCode), + }).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode) + + switch { + case statusCode >= 200 && statusCode < 400: + // it's a client error + case statusCode >= 400 && statusCode < 500: + err = InvalidParameter(err) + case statusCode >= 500 && statusCode < 600: + err = System(err) + default: + err = Unknown(err) + } + } + return err +} + +// statusCodeFromGRPCError returns status code according to gRPC error +func statusCodeFromGRPCError(err error) int { + switch status.Code(err) { + case codes.InvalidArgument: // code 3 + return http.StatusBadRequest + case codes.NotFound: // code 5 + return http.StatusNotFound + case codes.AlreadyExists: // code 6 + return http.StatusConflict + case codes.PermissionDenied: // code 7 + return http.StatusForbidden + case codes.FailedPrecondition: // code 9 + return http.StatusBadRequest + case codes.Unauthenticated: // code 16 + return http.StatusUnauthorized + case codes.OutOfRange: // code 11 + return http.StatusBadRequest + case codes.Unimplemented: // code 12 + return http.StatusNotImplemented + case codes.Unavailable: // code 14 + return http.StatusServiceUnavailable + default: + if e, ok := err.(causer); ok { + return statusCodeFromGRPCError(e.Cause()) + } + // codes.Canceled(1) + // codes.Unknown(2) + // codes.DeadlineExceeded(4) + // codes.ResourceExhausted(8) + // codes.Aborted(10) + // codes.Internal(13) + // codes.DataLoss(15) + return http.StatusInternalServerError + } +} + +// statusCodeFromDistributionError returns status code according to registry errcode +// code is loosely based on errcode.ServeJSON() in docker/distribution +func statusCodeFromDistributionError(err error) int { + switch errs := err.(type) { + case errcode.Errors: + if len(errs) < 1 { + return http.StatusInternalServerError + } + if _, ok := errs[0].(errcode.ErrorCoder); ok { + return statusCodeFromDistributionError(errs[0]) + } + case errcode.ErrorCoder: + return errs.ErrorCode().Descriptor().HTTPStatusCode + default: + if e, ok := err.(causer); ok { + return statusCodeFromDistributionError(e.Cause()) + } + } + return http.StatusInternalServerError +} diff --git a/extender/code/vendor/github.com/docker/docker/errdefs/is.go b/extender/code/vendor/github.com/docker/docker/errdefs/is.go new file mode 100644 index 000000000..e0513331b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/errdefs/is.go @@ -0,0 +1,114 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +type causer interface { + Cause() error +} + +func getImplementer(err error) error { + switch e := err.(type) { + case + ErrNotFound, + ErrInvalidParameter, + ErrConflict, + ErrUnauthorized, + ErrUnavailable, + ErrForbidden, + ErrSystem, + ErrNotModified, + ErrAlreadyExists, + ErrNotImplemented, + ErrCancelled, + ErrDeadline, + ErrDataLoss, + ErrUnknown: + return err + case causer: + return getImplementer(e.Cause()) + default: + return err + } +} + +// IsNotFound returns if the passed in error is an ErrNotFound +func IsNotFound(err error) bool { + _, ok := getImplementer(err).(ErrNotFound) + return ok +} + +// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +func IsInvalidParameter(err error) bool { + _, ok := getImplementer(err).(ErrInvalidParameter) + return ok +} + +// IsConflict returns if the passed in error is an ErrConflict +func IsConflict(err error) bool { + _, ok := getImplementer(err).(ErrConflict) + return ok +} + +// IsUnauthorized returns if the passed in error is an ErrUnauthorized +func IsUnauthorized(err error) bool { + _, ok := getImplementer(err).(ErrUnauthorized) + return ok +} + +// IsUnavailable returns if the passed in error is an ErrUnavailable +func IsUnavailable(err error) bool { + _, ok := getImplementer(err).(ErrUnavailable) + return ok +} + +// IsForbidden returns if the passed in error is an ErrForbidden +func IsForbidden(err error) bool { + _, ok := getImplementer(err).(ErrForbidden) + return ok +} + +// IsSystem returns if the passed in error is an ErrSystem +func IsSystem(err error) bool { + _, ok := getImplementer(err).(ErrSystem) + return ok +} + +// IsNotModified returns if the passed in error is a NotModified error +func IsNotModified(err error) bool { + _, ok := getImplementer(err).(ErrNotModified) + return ok +} + +// IsAlreadyExists returns if the passed in error is a AlreadyExists error +func IsAlreadyExists(err error) bool { + _, ok := getImplementer(err).(ErrAlreadyExists) + return ok +} + +// IsNotImplemented returns if the passed in error is an ErrNotImplemented +func IsNotImplemented(err error) bool { + _, ok := getImplementer(err).(ErrNotImplemented) + return ok +} + +// IsUnknown returns if the passed in error is an ErrUnknown +func IsUnknown(err error) bool { + _, ok := getImplementer(err).(ErrUnknown) + return ok +} + +// IsCancelled returns if the passed in error is an ErrCancelled +func IsCancelled(err error) bool { + _, ok := getImplementer(err).(ErrCancelled) + return ok +} + +// IsDeadline returns if the passed in error is an ErrDeadline +func IsDeadline(err error) bool { + _, ok := getImplementer(err).(ErrDeadline) + return ok +} + +// IsDataLoss returns if the passed in error is an ErrDataLoss +func IsDataLoss(err error) bool { + _, ok := getImplementer(err).(ErrDataLoss) + return ok +} diff --git a/extender/code/vendor/github.com/docker/docker/image/fs.go b/extender/code/vendor/github.com/docker/docker/image/fs.go new file mode 100644 index 000000000..7080c8c01 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/image/fs.go @@ -0,0 +1,175 @@ +package image // import "github.com/docker/docker/image" + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// DigestWalkFunc is function called by StoreBackend.Walk +type DigestWalkFunc func(id digest.Digest) error + +// StoreBackend provides interface for image.Store persistence +type StoreBackend interface { + Walk(f DigestWalkFunc) error + Get(id digest.Digest) ([]byte, error) + Set(data []byte) (digest.Digest, error) + Delete(id digest.Digest) error + SetMetadata(id digest.Digest, key string, data []byte) error + GetMetadata(id digest.Digest, key string) ([]byte, error) + DeleteMetadata(id digest.Digest, key string) error +} + +// fs implements StoreBackend using the filesystem. +type fs struct { + sync.RWMutex + root string +} + +const ( + contentDirName = "content" + metadataDirName = "metadata" +) + +// NewFSStoreBackend returns new filesystem based backend for image.Store +func NewFSStoreBackend(root string) (StoreBackend, error) { + return newFSStore(root) +} + +func newFSStore(root string) (*fs, error) { + s := &fs{ + root: root, + } + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + return nil, errors.Wrap(err, "failed to create storage backend") + } + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + return nil, errors.Wrap(err, "failed to create storage backend") + } + return s, nil +} + +func (s *fs) contentFile(dgst digest.Digest) string { + return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +func (s *fs) metadataDir(dgst digest.Digest) string { + return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +// Walk calls the supplied callback for each image ID in the storage backend. +func (s *fs) Walk(f DigestWalkFunc) error { + // Only Canonical digest (sha256) is currently supported + s.RLock() + dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) + s.RUnlock() + if err != nil { + return err + } + for _, v := range dir { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("skipping invalid digest %s: %s", dgst, err) + continue + } + if err := f(dgst); err != nil { + return err + } + } + return nil +} + +// Get returns the content stored under a given digest. +func (s *fs) Get(dgst digest.Digest) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + return s.get(dgst) +} + +func (s *fs) get(dgst digest.Digest) ([]byte, error) { + content, err := ioutil.ReadFile(s.contentFile(dgst)) + if err != nil { + return nil, errors.Wrapf(err, "failed to get digest %s", dgst) + } + + // todo: maybe optional + if digest.FromBytes(content) != dgst { + return nil, fmt.Errorf("failed to verify: %v", dgst) + } + + return content, nil +} + +// Set stores content by checksum. +func (s *fs) Set(data []byte) (digest.Digest, error) { + s.Lock() + defer s.Unlock() + + if len(data) == 0 { + return "", fmt.Errorf("invalid empty data") + } + + dgst := digest.FromBytes(data) + if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { + return "", errors.Wrap(err, "failed to write digest data") + } + + return dgst, nil +} + +// Delete removes content and metadata files associated with the digest. +func (s *fs) Delete(dgst digest.Digest) error { + s.Lock() + defer s.Unlock() + + if err := os.RemoveAll(s.metadataDir(dgst)); err != nil { + return err + } + return os.Remove(s.contentFile(dgst)) +} + +// SetMetadata sets metadata for a given ID. It fails if there's no base file. +func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error { + s.Lock() + defer s.Unlock() + if _, err := s.get(dgst); err != nil { + return err + } + + baseDir := filepath.Join(s.metadataDir(dgst)) + if err := os.MkdirAll(baseDir, 0700); err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600) +} + +// GetMetadata returns metadata for a given digest. +func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + if _, err := s.get(dgst); err != nil { + return nil, err + } + bytes, err := ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) + if err != nil { + return nil, errors.Wrap(err, "failed to read metadata") + } + return bytes, nil +} + +// DeleteMetadata removes the metadata associated with a digest. +func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { + s.Lock() + defer s.Unlock() + + return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) +} diff --git a/extender/code/vendor/github.com/docker/docker/image/image.go b/extender/code/vendor/github.com/docker/docker/image/image.go new file mode 100644 index 000000000..7e0646f07 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/image/image.go @@ -0,0 +1,232 @@ +package image // import "github.com/docker/docker/image" + +import ( + "encoding/json" + "errors" + "io" + "runtime" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +// ID is the content-addressable ID of an image. +type ID digest.Digest + +func (id ID) String() string { + return id.Digest().String() +} + +// Digest converts ID into a digest +func (id ID) Digest() digest.Digest { + return digest.Digest(id) +} + +// IDFromDigest creates an ID from a digest +func IDFromDigest(digest digest.Digest) ID { + return ID(digest) +} + +// V1Image stores the V1 image configuration. +type V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig container.Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *container.Config `json:"config,omitempty"` + // Architecture is the hardware that the image is built and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Image stores the image configuration +type Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID +} + +// RawJSON returns the immutable JSON associated with the image. +func (img *Image) RawJSON() []byte { + return img.rawJSON +} + +// ID returns the image's content-addressable ID. +func (img *Image) ID() ID { + return img.computedID +} + +// ImageID stringifies ID. +func (img *Image) ImageID() string { + return img.ID().String() +} + +// RunConfig returns the image's container config. +func (img *Image) RunConfig() *container.Config { + return img.Config +} + +// BaseImgArch returns the image's architecture. If not populated, defaults to the host runtime arch. +func (img *Image) BaseImgArch() string { + arch := img.Architecture + if arch == "" { + arch = runtime.GOARCH + } + return arch +} + +// OperatingSystem returns the image's operating system. If not populated, defaults to the host runtime OS. +func (img *Image) OperatingSystem() string { + os := img.OS + if os == "" { + os = runtime.GOOS + } + return os +} + +// MarshalJSON serializes the image to JSON. It sorts the top-level keys so +// that JSON that's been manipulated by a push/pull cycle with a legacy +// registry won't end up with a different key order. +func (img *Image) MarshalJSON() ([]byte, error) { + type MarshalImage Image + + pass1, err := json.Marshal(MarshalImage(*img)) + if err != nil { + return nil, err + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(pass1, &c); err != nil { + return nil, err + } + return json.Marshal(c) +} + +// ChildConfig is the configuration to apply to an Image to create a new +// Child image. Other properties of the image are copied from the parent. +type ChildConfig struct { + ContainerID string + Author string + Comment string + DiffID layer.DiffID + ContainerConfig *container.Config + Config *container.Config +} + +// NewChildImage creates a new Image as a child of this image. +func NewChildImage(img *Image, child ChildConfig, platform string) *Image { + isEmptyLayer := layer.IsEmpty(child.DiffID) + var rootFS *RootFS + if img.RootFS != nil { + rootFS = img.RootFS.Clone() + } else { + rootFS = NewRootFS() + } + + if !isEmptyLayer { + rootFS.Append(child.DiffID) + } + imgHistory := NewHistory( + child.Author, + child.Comment, + strings.Join(child.ContainerConfig.Cmd, " "), + isEmptyLayer) + + return &Image{ + V1Image: V1Image{ + DockerVersion: dockerversion.Version, + Config: child.Config, + Architecture: img.BaseImgArch(), + OS: platform, + Container: child.ContainerID, + ContainerConfig: *child.ContainerConfig, + Author: child.Author, + Created: imgHistory.Created, + }, + RootFS: rootFS, + History: append(img.History, imgHistory), + OSFeatures: img.OSFeatures, + OSVersion: img.OSVersion, + } +} + +// History stores build commands that were used to create an image +type History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// NewHistory creates a new history struct from arguments, and sets the created +// time to the current time in UTC +func NewHistory(author, comment, createdBy string, isEmptyLayer bool) History { + return History{ + Author: author, + Created: time.Now().UTC(), + CreatedBy: createdBy, + Comment: comment, + EmptyLayer: isEmptyLayer, + } +} + +// Exporter provides interface for loading and saving images +type Exporter interface { + Load(io.ReadCloser, io.Writer, bool) error + // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error + Save([]string, io.Writer) error +} + +// NewFromJSON creates an Image configuration from json. +func NewFromJSON(src []byte) (*Image, error) { + img := &Image{} + + if err := json.Unmarshal(src, img); err != nil { + return nil, err + } + if img.RootFS == nil { + return nil, errors.New("invalid image JSON, no RootFS key") + } + + img.rawJSON = src + + return img, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/image/rootfs.go b/extender/code/vendor/github.com/docker/docker/image/rootfs.go new file mode 100644 index 000000000..84843e10c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/image/rootfs.go @@ -0,0 +1,52 @@ +package image // import "github.com/docker/docker/image" + +import ( + "runtime" + + "github.com/docker/docker/layer" + "github.com/sirupsen/logrus" +) + +// TypeLayers is used for RootFS.Type for filesystems organized into layers. +const TypeLayers = "layers" + +// typeLayersWithBase is an older format used by Windows up to v1.12. We +// explicitly handle this as an error case to ensure that a daemon which still +// has an older image like this on disk can still start, even though the +// image itself is not usable. See https://github.com/docker/docker/pull/25806. +const typeLayersWithBase = "layers+base" + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: TypeLayers} +} + +// Append appends a new diffID to rootfs +func (r *RootFS) Append(id layer.DiffID) { + r.DiffIDs = append(r.DiffIDs, id) +} + +// Clone returns a copy of the RootFS +func (r *RootFS) Clone() *RootFS { + newRoot := NewRootFS() + newRoot.Type = r.Type + newRoot.DiffIDs = append(r.DiffIDs) + return newRoot +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { + logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) + return "" + } + return layer.CreateChainID(r.DiffIDs) +} diff --git a/extender/code/vendor/github.com/docker/docker/image/store.go b/extender/code/vendor/github.com/docker/docker/image/store.go new file mode 100644 index 000000000..1a8a8a245 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/image/store.go @@ -0,0 +1,346 @@ +package image // import "github.com/docker/docker/image" + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/docker/distribution/digestset" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Store is an interface for creating and accessing images +type Store interface { + Create(config []byte) (ID, error) + Get(id ID) (*Image, error) + Delete(id ID) ([]layer.Metadata, error) + Search(partialID string) (ID, error) + SetParent(id ID, parent ID) error + GetParent(id ID) (ID, error) + SetLastUpdated(id ID) error + GetLastUpdated(id ID) (time.Time, error) + Children(id ID) []ID + Map() map[ID]*Image + Heads() map[ID]*Image + Len() int +} + +// LayerGetReleaser is a minimal interface for getting and releasing images. +type LayerGetReleaser interface { + Get(layer.ChainID) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type imageMeta struct { + layer layer.Layer + children map[ID]struct{} +} + +type store struct { + sync.RWMutex + lss map[string]LayerGetReleaser + images map[ID]*imageMeta + fs StoreBackend + digestSet *digestset.Set +} + +// NewImageStore returns new store object for given set of layer stores +func NewImageStore(fs StoreBackend, lss map[string]LayerGetReleaser) (Store, error) { + is := &store{ + lss: lss, + images: make(map[ID]*imageMeta), + fs: fs, + digestSet: digestset.NewSet(), + } + + // load all current images and retain layers + if err := is.restore(); err != nil { + return nil, err + } + + return is, nil +} + +func (is *store) restore() error { + err := is.fs.Walk(func(dgst digest.Digest) error { + img, err := is.Get(IDFromDigest(dgst)) + if err != nil { + logrus.Errorf("invalid image %v, %v", dgst, err) + return nil + } + var l layer.Layer + if chainID := img.RootFS.ChainID(); chainID != "" { + if !system.IsOSSupported(img.OperatingSystem()) { + logrus.Errorf("not restoring image with unsupported operating system %v, %v, %s", dgst, chainID, img.OperatingSystem()) + return nil + } + l, err = is.lss[img.OperatingSystem()].Get(chainID) + if err != nil { + if err == layer.ErrLayerDoesNotExist { + logrus.Errorf("layer does not exist, not restoring image %v, %v, %s", dgst, chainID, img.OperatingSystem()) + return nil + } + return err + } + } + if err := is.digestSet.Add(dgst); err != nil { + return err + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[IDFromDigest(dgst)] = imageMeta + + return nil + }) + if err != nil { + return err + } + + // Second pass to fill in children maps + for id := range is.images { + if parent, err := is.GetParent(id); err == nil { + if parentMeta := is.images[parent]; parentMeta != nil { + parentMeta.children[id] = struct{}{} + } + } + } + + return nil +} + +func (is *store) Create(config []byte) (ID, error) { + var img Image + err := json.Unmarshal(config, &img) + if err != nil { + return "", err + } + + // Must reject any config that references diffIDs from the history + // which aren't among the rootfs layers. + rootFSLayers := make(map[layer.DiffID]struct{}) + for _, diffID := range img.RootFS.DiffIDs { + rootFSLayers[diffID] = struct{}{} + } + + layerCounter := 0 + for _, h := range img.History { + if !h.EmptyLayer { + layerCounter++ + } + } + if layerCounter > len(img.RootFS.DiffIDs) { + return "", errors.New("too many non-empty layers in History section") + } + + dgst, err := is.fs.Set(config) + if err != nil { + return "", err + } + imageID := IDFromDigest(dgst) + + is.Lock() + defer is.Unlock() + + if _, exists := is.images[imageID]; exists { + return imageID, nil + } + + layerID := img.RootFS.ChainID() + + var l layer.Layer + if layerID != "" { + if !system.IsOSSupported(img.OperatingSystem()) { + return "", system.ErrNotSupportedOperatingSystem + } + l, err = is.lss[img.OperatingSystem()].Get(layerID) + if err != nil { + return "", errors.Wrapf(err, "failed to get layer %s", layerID) + } + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[imageID] = imageMeta + if err := is.digestSet.Add(imageID.Digest()); err != nil { + delete(is.images, imageID) + return "", err + } + + return imageID, nil +} + +type imageNotFoundError string + +func (e imageNotFoundError) Error() string { + return "No such image: " + string(e) +} + +func (imageNotFoundError) NotFound() {} + +func (is *store) Search(term string) (ID, error) { + dgst, err := is.digestSet.Lookup(term) + if err != nil { + if err == digestset.ErrDigestNotFound { + err = imageNotFoundError(term) + } + return "", errors.WithStack(err) + } + return IDFromDigest(dgst), nil +} + +func (is *store) Get(id ID) (*Image, error) { + // todo: Check if image is in images + // todo: Detect manual insertions and start using them + config, err := is.fs.Get(id.Digest()) + if err != nil { + return nil, err + } + + img, err := NewFromJSON(config) + if err != nil { + return nil, err + } + img.computedID = id + + img.Parent, err = is.GetParent(id) + if err != nil { + img.Parent = "" + } + + return img, nil +} + +func (is *store) Delete(id ID) ([]layer.Metadata, error) { + is.Lock() + defer is.Unlock() + + imageMeta := is.images[id] + if imageMeta == nil { + return nil, fmt.Errorf("unrecognized image ID %s", id.String()) + } + img, err := is.Get(id) + if err != nil { + return nil, fmt.Errorf("unrecognized image %s, %v", id.String(), err) + } + if !system.IsOSSupported(img.OperatingSystem()) { + return nil, fmt.Errorf("unsupported image operating system %q", img.OperatingSystem()) + } + for id := range imageMeta.children { + is.fs.DeleteMetadata(id.Digest(), "parent") + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + + if err := is.digestSet.Remove(id.Digest()); err != nil { + logrus.Errorf("error removing %s from digest set: %q", id, err) + } + delete(is.images, id) + is.fs.Delete(id.Digest()) + + if imageMeta.layer != nil { + return is.lss[img.OperatingSystem()].Release(imageMeta.layer) + } + return nil, nil +} + +func (is *store) SetParent(id, parent ID) error { + is.Lock() + defer is.Unlock() + parentMeta := is.images[parent] + if parentMeta == nil { + return fmt.Errorf("unknown parent image ID %s", parent.String()) + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + parentMeta.children[id] = struct{}{} + return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent)) +} + +func (is *store) GetParent(id ID) (ID, error) { + d, err := is.fs.GetMetadata(id.Digest(), "parent") + if err != nil { + return "", err + } + return ID(d), nil // todo: validate? +} + +// SetLastUpdated time for the image ID to the current time +func (is *store) SetLastUpdated(id ID) error { + lastUpdated := []byte(time.Now().Format(time.RFC3339Nano)) + return is.fs.SetMetadata(id.Digest(), "lastUpdated", lastUpdated) +} + +// GetLastUpdated time for the image ID +func (is *store) GetLastUpdated(id ID) (time.Time, error) { + bytes, err := is.fs.GetMetadata(id.Digest(), "lastUpdated") + if err != nil || len(bytes) == 0 { + // No lastUpdated time + return time.Time{}, nil + } + return time.Parse(time.RFC3339Nano, string(bytes)) +} + +func (is *store) Children(id ID) []ID { + is.RLock() + defer is.RUnlock() + + return is.children(id) +} + +func (is *store) children(id ID) []ID { + var ids []ID + if is.images[id] != nil { + for id := range is.images[id].children { + ids = append(ids, id) + } + } + return ids +} + +func (is *store) Heads() map[ID]*Image { + return is.imagesMap(false) +} + +func (is *store) Map() map[ID]*Image { + return is.imagesMap(true) +} + +func (is *store) imagesMap(all bool) map[ID]*Image { + is.RLock() + defer is.RUnlock() + + images := make(map[ID]*Image) + + for id := range is.images { + if !all && len(is.children(id)) > 0 { + continue + } + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image access: %q, error: %q", id, err) + continue + } + images[id] = img + } + return images +} + +func (is *store) Len() int { + is.RLock() + defer is.RUnlock() + return len(is.images) +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/empty.go b/extender/code/vendor/github.com/docker/docker/layer/empty.go new file mode 100644 index 000000000..c81c70214 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/empty.go @@ -0,0 +1,61 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" +) + +// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - +// (1024 NULL bytes) +const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") + +type emptyLayer struct{} + +// EmptyLayer is a layer that corresponds to empty tar. +var EmptyLayer = &emptyLayer{} + +func (el *emptyLayer) TarStream() (io.ReadCloser, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return ioutil.NopCloser(buf), nil +} + +func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) { + if p == "" { + return el.TarStream() + } + return nil, fmt.Errorf("can't get parent tar stream of an empty layer") +} + +func (el *emptyLayer) ChainID() ChainID { + return ChainID(DigestSHA256EmptyTar) +} + +func (el *emptyLayer) DiffID() DiffID { + return DigestSHA256EmptyTar +} + +func (el *emptyLayer) Parent() Layer { + return nil +} + +func (el *emptyLayer) Size() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} + +// IsEmpty returns true if the layer is an EmptyLayer +func IsEmpty(diffID DiffID) bool { + return diffID == DigestSHA256EmptyTar +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/filestore.go b/extender/code/vendor/github.com/docker/docker/layer/filestore.go new file mode 100644 index 000000000..208a0c3a8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/filestore.go @@ -0,0 +1,355 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/docker/distribution" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) + supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used + } +) + +type fileMetadataStore struct { + root string +} + +type fileMetadataTransaction struct { + store *fileMetadataStore + ws *ioutils.AtomicWriteSet +} + +// newFSMetadataStore returns an instance of a metadata store +// which is backed by files on disk using the provided root +// as the root of metadata files. +func newFSMetadataStore(root string) (*fileMetadataStore, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + return &fileMetadataStore{ + root: root, + }, nil +} + +func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { + dgst := digest.Digest(layer) + return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) +} + +func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { + return filepath.Join(fms.getLayerDirectory(layer), filename) +} + +func (fms *fileMetadataStore) getMountDirectory(mount string) string { + return filepath.Join(fms.root, "mounts", mount) +} + +func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { + return filepath.Join(fms.getMountDirectory(mount), filename) +} + +func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) { + tmpDir := filepath.Join(fms.root, "tmp") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, err + } + ws, err := ioutils.NewAtomicWriteSet(tmpDir) + if err != nil { + return nil, err + } + + return &fileMetadataTransaction{ + store: fms, + ws: ws, + }, nil +} + +func (fm *fileMetadataTransaction) SetSize(size int64) error { + content := fmt.Sprintf("%d", size) + return fm.ws.WriteFile("size", []byte(content), 0644) +} + +func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { + return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { + return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { + return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) +} + +func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { + jsonRef, err := json.Marshal(ref) + if err != nil { + return err + } + return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) +} + +func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { + f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + var wc io.WriteCloser + if compressInput { + wc = gzip.NewWriter(f) + } else { + wc = f + } + + return ioutils.NewWriteCloserWrapper(wc, func() error { + wc.Close() + return f.Close() + }), nil +} + +func (fm *fileMetadataTransaction) Commit(layer ChainID) error { + finalDir := fm.store.getLayerDirectory(layer) + if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + return err + } + + return fm.ws.Commit(finalDir) +} + +func (fm *fileMetadataTransaction) Cancel() error { + return fm.ws.Cancel() +} + +func (fm *fileMetadataTransaction) String() string { + return fm.ws.String() +} + +func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) + if err != nil { + return 0, err + } + + size, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + return 0, err + } + + return size, nil +} + +func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) + if err != nil { + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return DiffID(dgst), nil +} + +func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if content == "" { + return "", errors.Errorf("invalid cache id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) + if err != nil { + if os.IsNotExist(err) { + // only return empty descriptor to represent what is stored + return distribution.Descriptor{}, nil + } + return distribution.Descriptor{}, err + } + + var ref distribution.Descriptor + err = json.Unmarshal(content, &ref) + if err != nil { + return distribution.Descriptor{}, err + } + return ref, err +} + +func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { + fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) + if err != nil { + return nil, err + } + f, err := gzip.NewReader(fz) + if err != nil { + fz.Close() + return nil, err + } + + return ioutils.NewReadCloserWrapper(f, func() error { + f.Close() + return fz.Close() + }), nil +} + +func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) +} + +func (fms *fileMetadataStore) SetInitID(mount string, init string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) +} + +func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid mount id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid init id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { + var ids []ChainID + for _, algorithm := range supportedAlgorithms { + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, nil, err + } + + for _, fi := range fileInfos { + if fi.IsDir() && fi.Name() != "mounts" { + dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + } else { + ids = append(ids, ChainID(dgst)) + } + } + } + } + + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) + if err != nil { + if os.IsNotExist(err) { + return ids, []string{}, nil + } + return nil, nil, err + } + + var mounts []string + for _, fi := range fileInfos { + if fi.IsDir() { + mounts = append(mounts, fi.Name()) + } + } + + return ids, mounts, nil +} + +func (fms *fileMetadataStore) Remove(layer ChainID) error { + return os.RemoveAll(fms.getLayerDirectory(layer)) +} + +func (fms *fileMetadataStore) RemoveMount(mount string) error { + return os.RemoveAll(fms.getMountDirectory(mount)) +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/filestore_unix.go b/extender/code/vendor/github.com/docker/docker/layer/filestore_unix.go new file mode 100644 index 000000000..68e7f9077 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/filestore_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package layer // import "github.com/docker/docker/layer" + +import "runtime" + +// setOS writes the "os" file to the layer filestore +func (fm *fileMetadataTransaction) setOS(os string) error { + return nil +} + +// getOS reads the "os" file from the layer filestore +func (fms *fileMetadataStore) getOS(layer ChainID) (string, error) { + return runtime.GOOS, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/filestore_windows.go b/extender/code/vendor/github.com/docker/docker/layer/filestore_windows.go new file mode 100644 index 000000000..cecad426c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/filestore_windows.go @@ -0,0 +1,35 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "fmt" + "io/ioutil" + "os" + "strings" +) + +// setOS writes the "os" file to the layer filestore +func (fm *fileMetadataTransaction) setOS(os string) error { + if os == "" { + return nil + } + return fm.ws.WriteFile("os", []byte(os), 0644) +} + +// getOS reads the "os" file from the layer filestore +func (fms *fileMetadataStore) getOS(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "os")) + if err != nil { + // For backwards compatibility, the os file may not exist. Default to "windows" if missing. + if os.IsNotExist(err) { + return "windows", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if content != "windows" && content != "linux" { + return "", fmt.Errorf("invalid operating system value: %s", content) + } + + return content, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/layer.go b/extender/code/vendor/github.com/docker/docker/layer/layer.go new file mode 100644 index 000000000..d0c7fa860 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/layer.go @@ -0,0 +1,237 @@ +// Package layer is package for managing read-only +// and read-write mounts on the union file system +// driver. Read-only mounts are referenced using a +// content hash and are protected from mutation in +// the exposed interface. The tar format is used +// to create read-only layers and export both +// read-only and writable layers. The exported +// tar data for a read-only layer should match +// the tar used to create the layer. +package layer // import "github.com/docker/docker/layer" + +import ( + "errors" + "io" + + "github.com/docker/distribution" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +var ( + // ErrLayerDoesNotExist is used when an operation is + // attempted on a layer which does not exist. + ErrLayerDoesNotExist = errors.New("layer does not exist") + + // ErrLayerNotRetained is used when a release is + // attempted on a layer which is not retained. + ErrLayerNotRetained = errors.New("layer not retained") + + // ErrMountDoesNotExist is used when an operation is + // attempted on a mount layer which does not exist. + ErrMountDoesNotExist = errors.New("mount does not exist") + + // ErrMountNameConflict is used when a mount is attempted + // to be created but there is already a mount with the name + // used for creation. + ErrMountNameConflict = errors.New("mount already exists with name") + + // ErrActiveMount is used when an operation on a + // mount is attempted but the layer is still + // mounted and the operation cannot be performed. + ErrActiveMount = errors.New("mount still active") + + // ErrNotMounted is used when requesting an active + // mount but the layer is not mounted. + ErrNotMounted = errors.New("not mounted") + + // ErrMaxDepthExceeded is used when a layer is attempted + // to be created which would result in a layer depth + // greater than the 125 max. + ErrMaxDepthExceeded = errors.New("max depth exceeded") + + // ErrNotSupported is used when the action is not supported + // on the current host operating system. + ErrNotSupported = errors.New("not support on this host operating system") +) + +// ChainID is the content-addressable ID of a layer. +type ChainID digest.Digest + +// String returns a string rendition of a layer ID +func (id ChainID) String() string { + return string(id) +} + +// DiffID is the hash of an individual layer tar. +type DiffID digest.Digest + +// String returns a string rendition of a layer DiffID +func (diffID DiffID) String() string { + return string(diffID) +} + +// TarStreamer represents an object which may +// have its contents exported as a tar stream. +type TarStreamer interface { + // TarStream returns a tar archive stream + // for the contents of a layer. + TarStream() (io.ReadCloser, error) +} + +// Layer represents a read-only layer +type Layer interface { + TarStreamer + + // TarStreamFrom returns a tar archive stream for all the layer chain with + // arbitrary depth. + TarStreamFrom(ChainID) (io.ReadCloser, error) + + // ChainID returns the content hash of the entire layer chain. The hash + // chain is made up of DiffID of top layer and all of its parents. + ChainID() ChainID + + // DiffID returns the content hash of the layer + // tar stream used to create this layer. + DiffID() DiffID + + // Parent returns the next layer in the layer chain. + Parent() Layer + + // Size returns the size of the entire layer chain. The size + // is calculated from the total size of all files in the layers. + Size() (int64, error) + + // DiffSize returns the size difference of the top layer + // from parent layer. + DiffSize() (int64, error) + + // Metadata returns the low level storage metadata associated + // with layer. + Metadata() (map[string]string, error) +} + +// RWLayer represents a layer which is +// read and writable +type RWLayer interface { + TarStreamer + + // Name of mounted layer + Name() string + + // Parent returns the layer which the writable + // layer was created from. + Parent() Layer + + // Mount mounts the RWLayer and returns the filesystem path + // the to the writable layer. + Mount(mountLabel string) (containerfs.ContainerFS, error) + + // Unmount unmounts the RWLayer. This should be called + // for every mount. If there are multiple mount calls + // this operation will only decrement the internal mount counter. + Unmount() error + + // Size represents the size of the writable layer + // as calculated by the total size of the files + // changed in the mutable layer. + Size() (int64, error) + + // Changes returns the set of changes for the mutable layer + // from the base layer. + Changes() ([]archive.Change, error) + + // Metadata returns the low level metadata for the mutable layer + Metadata() (map[string]string, error) +} + +// Metadata holds information about a +// read-only layer +type Metadata struct { + // ChainID is the content hash of the layer + ChainID ChainID + + // DiffID is the hash of the tar data used to + // create the layer + DiffID DiffID + + // Size is the size of the layer and all parents + Size int64 + + // DiffSize is the size of the top layer + DiffSize int64 +} + +// MountInit is a function to initialize a +// writable mount. Changes made here will +// not be included in the Tar stream of the +// RWLayer. +type MountInit func(root containerfs.ContainerFS) error + +// CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer +type CreateRWLayerOpts struct { + MountLabel string + InitFunc MountInit + StorageOpt map[string]string +} + +// Store represents a backend for managing both +// read-only and read-write layers. +type Store interface { + Register(io.Reader, ChainID) (Layer, error) + Get(ChainID) (Layer, error) + Map() map[ChainID]Layer + Release(Layer) ([]Metadata, error) + + CreateRWLayer(id string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) + GetRWLayer(id string) (RWLayer, error) + GetMountID(id string) (string, error) + ReleaseRWLayer(RWLayer) ([]Metadata, error) + + Cleanup() error + DriverStatus() [][2]string + DriverName() string +} + +// DescribableStore represents a layer store capable of storing +// descriptors for layers. +type DescribableStore interface { + RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) +} + +// CreateChainID returns ID for a layerDigest slice +func CreateChainID(dgsts []DiffID) ChainID { + return createChainIDFromParent("", dgsts...) +} + +func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) +} + +// ReleaseAndLog releases the provided layer from the given layer +// store, logging any error and release metadata +func ReleaseAndLog(ls Store, l Layer) { + metadata, err := ls.Release(l) + if err != nil { + logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + } + LogReleaseMetadata(metadata) +} + +// LogReleaseMetadata logs a metadata array, uses this to +// ensure consistent logging for release metadata +func LogReleaseMetadata(metadatas []Metadata) { + for _, metadata := range metadatas { + logrus.Infof("Layer %s cleaned up", metadata.ChainID) + } +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/layer_store.go b/extender/code/vendor/github.com/docker/docker/layer/layer_store.go new file mode 100644 index 000000000..1601465c0 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/layer_store.go @@ -0,0 +1,755 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + + "github.com/docker/distribution" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// maxLayerDepth represents the maximum number of +// layers which can be chained together. 125 was +// chosen to account for the 127 max in some +// graphdrivers plus the 2 additional layers +// used to create a rwlayer. +const maxLayerDepth = 125 + +type layerStore struct { + store *fileMetadataStore + driver graphdriver.Driver + useTarSplit bool + + layerMap map[ChainID]*roLayer + layerL sync.Mutex + + mounts map[string]*mountedLayer + mountL sync.Mutex + os string +} + +// StoreOptions are the options used to create a new Store instance +type StoreOptions struct { + Root string + MetadataStorePathTemplate string + GraphDriver string + GraphDriverOptions []string + IDMapping *idtools.IdentityMapping + PluginGetter plugingetter.PluginGetter + ExperimentalEnabled bool + OS string +} + +// NewStoreFromOptions creates a new Store instance +func NewStoreFromOptions(options StoreOptions) (Store, error) { + driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ + Root: options.Root, + DriverOptions: options.GraphDriverOptions, + UIDMaps: options.IDMapping.UIDs(), + GIDMaps: options.IDMapping.GIDs(), + ExperimentalEnabled: options.ExperimentalEnabled, + }) + if err != nil { + return nil, fmt.Errorf("error initializing graphdriver: %v", err) + } + logrus.Debugf("Initialized graph driver %s", driver) + + root := fmt.Sprintf(options.MetadataStorePathTemplate, driver) + + return newStoreFromGraphDriver(root, driver, options.OS) +} + +// newStoreFromGraphDriver creates a new Store instance using the provided +// metadata store and graph driver. The metadata store will be used to restore +// the Store. +func newStoreFromGraphDriver(root string, driver graphdriver.Driver, os string) (Store, error) { + if !system.IsOSSupported(os) { + return nil, fmt.Errorf("failed to initialize layer store as operating system '%s' is not supported", os) + } + caps := graphdriver.Capabilities{} + if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok { + caps = capDriver.Capabilities() + } + + ms, err := newFSMetadataStore(root) + if err != nil { + return nil, err + } + + ls := &layerStore{ + store: ms, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + useTarSplit: !caps.ReproducesExactDiffs, + os: os, + } + + ids, mounts, err := ms.List() + if err != nil { + return nil, err + } + + for _, id := range ids { + l, err := ls.loadLayer(id) + if err != nil { + logrus.Debugf("Failed to load layer %s: %s", id, err) + continue + } + if l.parent != nil { + l.parent.referenceCount++ + } + } + + for _, mount := range mounts { + if err := ls.loadMount(mount); err != nil { + logrus.Debugf("Failed to load mount %s: %s", mount, err) + } + } + + return ls, nil +} + +func (ls *layerStore) Driver() graphdriver.Driver { + return ls.driver +} + +func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { + cl, ok := ls.layerMap[layer] + if ok { + return cl, nil + } + + diff, err := ls.store.GetDiffID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) + } + + size, err := ls.store.GetSize(layer) + if err != nil { + return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) + } + + cacheID, err := ls.store.GetCacheID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) + } + + parent, err := ls.store.GetParent(layer) + if err != nil { + return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) + } + + descriptor, err := ls.store.GetDescriptor(layer) + if err != nil { + return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) + } + + os, err := ls.store.getOS(layer) + if err != nil { + return nil, fmt.Errorf("failed to get operating system for %s: %s", layer, err) + } + + if os != ls.os { + return nil, fmt.Errorf("failed to load layer with os %s into layerstore for %s", os, ls.os) + } + + cl = &roLayer{ + chainID: layer, + diffID: diff, + size: size, + cacheID: cacheID, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return nil, err + } + cl.parent = p + } + + ls.layerMap[cl.chainID] = cl + + return cl, nil +} + +func (ls *layerStore) loadMount(mount string) error { + if _, ok := ls.mounts[mount]; ok { + return nil + } + + mountID, err := ls.store.GetMountID(mount) + if err != nil { + return err + } + + initID, err := ls.store.GetInitID(mount) + if err != nil { + return err + } + + parent, err := ls.store.GetMountParent(mount) + if err != nil { + return err + } + + ml := &mountedLayer{ + name: mount, + mountID: mountID, + initID: initID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return err + } + ml.parent = p + + p.referenceCount++ + } + + ls.mounts[ml.name] = ml + + return nil +} + +func (ls *layerStore) applyTar(tx *fileMetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.Digester() + tr := io.TeeReader(ts, digester.Hash()) + + rdr := tr + if ls.useTarSplit { + tsw, err := tx.TarSplitWriter(true) + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err = asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } + } + + applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) + // discard trailing data but ensure metadata is picked up to reconstruct stream + // unconditionally call io.Copy here before checking err to ensure the resources + // allocated by NewInputTarStream above are always released + io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed + if err != nil { + return err + } + + layer.size = applySize + layer.diffID = DiffID(digester.Digest()) + + logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + + return nil +} + +func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{}) +} + +func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var pid string + var p *roLayer + + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + if p.depth() >= maxLayerDepth { + err = ErrMaxDepthExceeded + return nil, err + } + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: stringid.GenerateRandomID(), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { + return nil, err + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) + if err := ls.driver.Remove(layer.cacheID); err != nil { + logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + } + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.applyTar(tx, ts, pid, layer); err != nil { + return nil, err + } + + if layer.parent == nil { + layer.chainID = ChainID(layer.diffID) + } else { + layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return the error + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { + l, ok := ls.layerMap[layer] + if !ok { + return nil + } + + l.referenceCount++ + + return l +} + +func (ls *layerStore) get(l ChainID) *roLayer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + return ls.getWithoutLock(l) +} + +func (ls *layerStore) Get(l ChainID) (Layer, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layer := ls.getWithoutLock(l) + if layer == nil { + return nil, ErrLayerDoesNotExist + } + + return layer.getReference(), nil +} + +func (ls *layerStore) Map() map[ChainID]Layer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layers := map[ChainID]Layer{} + + for k, v := range ls.layerMap { + layers[k] = v + } + + return layers +} + +func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { + err := ls.driver.Remove(layer.cacheID) + if err != nil { + return err + } + err = ls.store.Remove(layer.chainID) + if err != nil { + return err + } + metadata.DiffID = layer.diffID + metadata.ChainID = layer.chainID + metadata.Size, err = layer.Size() + if err != nil { + return err + } + metadata.DiffSize = layer.size + + return nil +} + +func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { + depth := 0 + removed := []Metadata{} + for { + if l.referenceCount == 0 { + panic("layer not retained") + } + l.referenceCount-- + if l.referenceCount != 0 { + return removed, nil + } + + if len(removed) == 0 && depth > 0 { + panic("cannot remove layer with child") + } + if l.hasReferences() { + panic("cannot delete referenced layer") + } + var metadata Metadata + if err := ls.deleteLayer(l, &metadata); err != nil { + return nil, err + } + + delete(ls.layerMap, l.chainID) + removed = append(removed, metadata) + + if l.parent == nil { + return removed, nil + } + + depth++ + l = l.parent + } +} + +func (ls *layerStore) Release(l Layer) ([]Metadata, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + layer, ok := ls.layerMap[l.ChainID()] + if !ok { + return []Metadata{}, nil + } + if !layer.hasReference(l) { + return nil, ErrLayerNotRetained + } + + layer.deleteReference(l) + + return ls.releaseLayer(layer) +} + +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) { + var ( + storageOpt map[string]string + initFunc MountInit + mountLabel string + ) + + if opts != nil { + mountLabel = opts.MountLabel + storageOpt = opts.StorageOpt + initFunc = opts.InitFunc + } + + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + return nil, ErrMountNameConflict + } + + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + m = &mountedLayer{ + name: name, + parent: p, + mountID: ls.mountID(name), + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if initFunc != nil { + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) + if err != nil { + return nil, err + } + m.initID = pid + } + + createOpts := &graphdriver.CreateOpts{ + StorageOpt: storageOpt, + } + + if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { + return nil, err + } + if err = ls.saveMount(m); err != nil { + return nil, err + } + + return m.getReference(), nil +} + +func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return nil, ErrMountDoesNotExist + } + + return mount.getReference(), nil +} + +func (ls *layerStore) GetMountID(id string) (string, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return "", ErrMountDoesNotExist + } + logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) + + return mount.mountID, nil +} + +func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[l.Name()] + if !ok { + return []Metadata{}, nil + } + + if err := m.deleteReference(l); err != nil { + return nil, err + } + + if m.hasReferences() { + return []Metadata{}, nil + } + + if err := ls.driver.Remove(m.mountID); err != nil { + logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + if m.initID != "" { + if err := ls.driver.Remove(m.initID); err != nil { + logrus.Errorf("Error removing init layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + } + + if err := ls.store.RemoveMount(m.name); err != nil { + logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + delete(ls.mounts, m.Name()) + + ls.layerL.Lock() + defer ls.layerL.Unlock() + if m.parent != nil { + return ls.releaseLayer(m.parent) + } + + return []Metadata{}, nil +} + +func (ls *layerStore) saveMount(mount *mountedLayer) error { + if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { + return err + } + + if mount.initID != "" { + if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { + return err + } + } + + if mount.parent != nil { + if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { + return err + } + } + + ls.mounts[mount.name] = mount + + return nil +} + +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { + // Use "-init" to maintain compatibility with graph drivers + // which are expecting this layer with this special name. If all + // graph drivers can be updated to not rely on knowing about this layer + // then the initID should be randomly generated. + initID := fmt.Sprintf("%s-init", graphID) + + createOpts := &graphdriver.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: storageOpt, + } + + if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil { + return "", err + } + p, err := ls.driver.Get(initID, "") + if err != nil { + return "", err + } + + if err := initFunc(p); err != nil { + ls.driver.Put(initID) + return "", err + } + + if err := ls.driver.Put(initID); err != nil { + return "", err + } + + return initID, nil +} + +func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) { + if !ls.useTarSplit { + var parentCacheID string + if rl.parent != nil { + parentCacheID = rl.parent.cacheID + } + + return ls.driver.Diff(rl.cacheID, parentCacheID) + } + + r, err := ls.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := ls.assembleTarTo(rl.cacheID, r, nil, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + + return pr, nil +} + +func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { + diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + defer metadata.Close() + + // get our relative path to the container + fileGetCloser, err := diffDriver.DiffGetter(graphID) + if err != nil { + return err + } + defer fileGetCloser.Close() + + metaUnpacker := storage.NewJSONUnpacker(metadata) + upackerCounter := &unpackSizeCounter{metaUnpacker, size} + logrus.Debugf("Assembling tar data for %s", graphID) + return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) +} + +func (ls *layerStore) Cleanup() error { + return ls.driver.Cleanup() +} + +func (ls *layerStore) DriverStatus() [][2]string { + return ls.driver.Status() +} + +func (ls *layerStore) DriverName() string { + return ls.driver.String() +} + +type naiveDiffPathDriver struct { + graphdriver.Driver +} + +type fileGetPutter struct { + storage.FileGetter + driver graphdriver.Driver + id string +} + +func (w *fileGetPutter) Close() error { + return w.driver.Put(w.id) +} + +func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := n.Driver.Get(id, "") + if err != nil { + return nil, err + } + return &fileGetPutter{storage.NewPathFileGetter(p.Path()), n.Driver, id}, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/layer_store_windows.go b/extender/code/vendor/github.com/docker/docker/layer/layer_store_windows.go new file mode 100644 index 000000000..eca1f6a83 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/layer_store_windows.go @@ -0,0 +1,11 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "io" + + "github.com/docker/distribution" +) + +func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, descriptor) +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/layer_unix.go b/extender/code/vendor/github.com/docker/docker/layer/layer_unix.go new file mode 100644 index 000000000..002c7ff83 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/layer_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd darwin openbsd + +package layer // import "github.com/docker/docker/layer" + +import "github.com/docker/docker/pkg/stringid" + +func (ls *layerStore) mountID(name string) string { + return stringid.GenerateRandomID() +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/layer_windows.go b/extender/code/vendor/github.com/docker/docker/layer/layer_windows.go new file mode 100644 index 000000000..3d079a9af --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/layer_windows.go @@ -0,0 +1,46 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "errors" +) + +// Getter is an interface to get the path to a layer on the host. +type Getter interface { + // GetLayerPath gets the path for the layer. This is different from Get() + // since that returns an interface to account for unmountable layers. + GetLayerPath(id string) (string, error) +} + +// GetLayerPath returns the path to a layer +func GetLayerPath(s Store, layer ChainID) (string, error) { + ls, ok := s.(*layerStore) + if !ok { + return "", errors.New("unsupported layer store") + } + ls.layerL.Lock() + defer ls.layerL.Unlock() + + rl, ok := ls.layerMap[layer] + if !ok { + return "", ErrLayerDoesNotExist + } + + if layerGetter, ok := ls.driver.(Getter); ok { + return layerGetter.GetLayerPath(rl.cacheID) + } + path, err := ls.driver.Get(rl.cacheID, "") + if err != nil { + return "", err + } + + if err := ls.driver.Put(rl.cacheID); err != nil { + return "", err + } + + return path.Path(), nil +} + +func (ls *layerStore) mountID(name string) string { + // windows has issues if container ID doesn't match mount ID + return name +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/migration.go b/extender/code/vendor/github.com/docker/docker/layer/migration.go new file mode 100644 index 000000000..2668ea96b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/migration.go @@ -0,0 +1,252 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// CreateRWLayerByGraphID creates a RWLayer in the layer store using +// the provided name with the given graphID. To get the RWLayer +// after migration the layer may be retrieved by the given name. +func (ls *layerStore) CreateRWLayerByGraphID(name, graphID string, parent ChainID) (err error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + if m.parent.chainID != parent { + return errors.New("name conflict, mismatched parent") + } + if m.mountID != graphID { + return errors.New("mount already exists") + } + + return nil + } + + if !ls.driver.Exists(graphID) { + return fmt.Errorf("graph ID does not exist: %q", graphID) + } + + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // TODO: Ensure graphID has correct parent + + m = &mountedLayer{ + name: name, + parent: p, + mountID: graphID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + // Check for existing init layer + initID := fmt.Sprintf("%s-init", graphID) + if ls.driver.Exists(initID) { + m.initID = initID + } + + return ls.saveMount(m) +} + +func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { + defer func() { + if err != nil { + logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) + diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) + } + }() + + if oldTarDataPath == "" { + err = errors.New("no tar-split file") + return + } + + tarDataFile, err := os.Open(oldTarDataPath) + if err != nil { + return + } + defer tarDataFile.Close() + uncompressed, err := gzip.NewReader(tarDataFile) + if err != nil { + return + } + + dgst := digest.Canonical.Digester() + err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) + if err != nil { + return + } + + diffID = DiffID(dgst.Digest()) + err = os.RemoveAll(newTarDataPath) + if err != nil { + return + } + err = os.Link(oldTarDataPath, newTarDataPath) + + return +} + +func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { + rawarchive, err := ls.driver.Diff(id, parent) + if err != nil { + return + } + defer rawarchive.Close() + + f, err := os.Create(newTarDataPath) + if err != nil { + return + } + defer f.Close() + mfz := gzip.NewWriter(f) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + packerCounter := &packSizeCounter{metaPacker, &size} + + archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) + if err != nil { + return + } + dgst, err := digest.FromReader(archive) + if err != nil { + return + } + diffID = DiffID(dgst) + return +} + +func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: graphID, + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + diffID: diffID, + size: size, + chainID: createChainIDFromParent(parent, diffID), + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + tsw, err := tx.TarSplitWriter(false) + if err != nil { + return nil, err + } + defer tsw.Close() + tdf, err := os.Open(tarDataFile) + if err != nil { + return nil, err + } + defer tdf.Close() + _, err = io.Copy(tsw, tdf) + if err != nil { + return nil, err + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +type unpackSizeCounter struct { + unpacker storage.Unpacker + size *int64 +} + +func (u *unpackSizeCounter) Next() (*storage.Entry, error) { + e, err := u.unpacker.Next() + if err == nil && u.size != nil { + *u.size += e.Size + } + return e, err +} + +type packSizeCounter struct { + packer storage.Packer + size *int64 +} + +func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { + n, err := p.packer.AddEntry(e) + if err == nil && p.size != nil { + *p.size += e.Size + } + return n, err +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/mounted_layer.go b/extender/code/vendor/github.com/docker/docker/layer/mounted_layer.go new file mode 100644 index 000000000..d6858c662 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/mounted_layer.go @@ -0,0 +1,100 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" +) + +type mountedLayer struct { + name string + mountID string + initID string + parent *roLayer + path string + layerStore *layerStore + + references map[RWLayer]*referencedRWLayer +} + +func (ml *mountedLayer) cacheParent() string { + if ml.initID != "" { + return ml.initID + } + if ml.parent != nil { + return ml.parent.cacheID + } + return "" +} + +func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { + return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Name() string { + return ml.name +} + +func (ml *mountedLayer) Parent() Layer { + if ml.parent != nil { + return ml.parent + } + + // Return a nil interface instead of an interface wrapping a nil + // pointer. + return nil +} + +func (ml *mountedLayer) Size() (int64, error) { + return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Changes() ([]archive.Change, error) { + return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Metadata() (map[string]string, error) { + return ml.layerStore.driver.GetMetadata(ml.mountID) +} + +func (ml *mountedLayer) getReference() RWLayer { + ref := &referencedRWLayer{ + mountedLayer: ml, + } + ml.references[ref] = ref + + return ref +} + +func (ml *mountedLayer) hasReferences() bool { + return len(ml.references) > 0 +} + +func (ml *mountedLayer) deleteReference(ref RWLayer) error { + if _, ok := ml.references[ref]; !ok { + return ErrLayerNotRetained + } + delete(ml.references, ref) + return nil +} + +func (ml *mountedLayer) retakeReference(r RWLayer) { + if ref, ok := r.(*referencedRWLayer); ok { + ml.references[ref] = ref + } +} + +type referencedRWLayer struct { + *mountedLayer +} + +func (rl *referencedRWLayer) Mount(mountLabel string) (containerfs.ContainerFS, error) { + return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) +} + +// Unmount decrements the activity count and unmounts the underlying layer +// Callers should only call `Unmount` once per call to `Mount`, even on error. +func (rl *referencedRWLayer) Unmount() error { + return rl.layerStore.driver.Put(rl.mountedLayer.mountID) +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/ro_layer.go b/extender/code/vendor/github.com/docker/docker/layer/ro_layer.go new file mode 100644 index 000000000..3555e8b02 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/ro_layer.go @@ -0,0 +1,182 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "fmt" + "io" + + "github.com/docker/distribution" + "github.com/opencontainers/go-digest" +) + +type roLayer struct { + chainID ChainID + diffID DiffID + parent *roLayer + cacheID string + size int64 + layerStore *layerStore + descriptor distribution.Descriptor + + referenceCount int + references map[Layer]struct{} +} + +// TarStream for roLayer guarantees that the data that is produced is the exact +// data that the layer was registered with. +func (rl *roLayer) TarStream() (io.ReadCloser, error) { + rc, err := rl.layerStore.getTarStream(rl) + if err != nil { + return nil, err + } + + vrc, err := newVerifiedReadCloser(rc, digest.Digest(rl.diffID)) + if err != nil { + return nil, err + } + return vrc, nil +} + +// TarStreamFrom does not make any guarantees to the correctness of the produced +// data. As such it should not be used when the layer content must be verified +// to be an exact match to the registered layer. +func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { + var parentCacheID string + for pl := rl.parent; pl != nil; pl = pl.parent { + if pl.chainID == parent { + parentCacheID = pl.cacheID + break + } + } + + if parent != ChainID("") && parentCacheID == "" { + return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent) + } + return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) +} + +func (rl *roLayer) CacheID() string { + return rl.cacheID +} + +func (rl *roLayer) ChainID() ChainID { + return rl.chainID +} + +func (rl *roLayer) DiffID() DiffID { + return rl.diffID +} + +func (rl *roLayer) Parent() Layer { + if rl.parent == nil { + return nil + } + return rl.parent +} + +func (rl *roLayer) Size() (size int64, err error) { + if rl.parent != nil { + size, err = rl.parent.Size() + if err != nil { + return + } + } + + return size + rl.size, nil +} + +func (rl *roLayer) DiffSize() (size int64, err error) { + return rl.size, nil +} + +func (rl *roLayer) Metadata() (map[string]string, error) { + return rl.layerStore.driver.GetMetadata(rl.cacheID) +} + +type referencedCacheLayer struct { + *roLayer +} + +func (rl *roLayer) getReference() Layer { + ref := &referencedCacheLayer{ + roLayer: rl, + } + rl.references[ref] = struct{}{} + + return ref +} + +func (rl *roLayer) hasReference(ref Layer) bool { + _, ok := rl.references[ref] + return ok +} + +func (rl *roLayer) hasReferences() bool { + return len(rl.references) > 0 +} + +func (rl *roLayer) deleteReference(ref Layer) { + delete(rl.references, ref) +} + +func (rl *roLayer) depth() int { + if rl.parent == nil { + return 1 + } + return rl.parent.depth() + 1 +} + +func storeLayer(tx *fileMetadataTransaction, layer *roLayer) error { + if err := tx.SetDiffID(layer.diffID); err != nil { + return err + } + if err := tx.SetSize(layer.size); err != nil { + return err + } + if err := tx.SetCacheID(layer.cacheID); err != nil { + return err + } + // Do not store empty descriptors + if layer.descriptor.Digest != "" { + if err := tx.SetDescriptor(layer.descriptor); err != nil { + return err + } + } + if layer.parent != nil { + if err := tx.SetParent(layer.parent.chainID); err != nil { + return err + } + } + return tx.setOS(layer.layerStore.os) +} + +func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { + return &verifiedReadCloser{ + rc: rc, + dgst: dgst, + verifier: dgst.Verifier(), + }, nil +} + +type verifiedReadCloser struct { + rc io.ReadCloser + dgst digest.Digest + verifier digest.Verifier +} + +func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { + n, err = vrc.rc.Read(p) + if n > 0 { + if n, err := vrc.verifier.Write(p[:n]); err != nil { + return n, err + } + } + if err == io.EOF { + if !vrc.verifier.Verified() { + err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) + } + } + return +} +func (vrc *verifiedReadCloser) Close() error { + return vrc.rc.Close() +} diff --git a/extender/code/vendor/github.com/docker/docker/layer/ro_layer_windows.go b/extender/code/vendor/github.com/docker/docker/layer/ro_layer_windows.go new file mode 100644 index 000000000..a4f0c8088 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/layer/ro_layer_windows.go @@ -0,0 +1,9 @@ +package layer // import "github.com/docker/docker/layer" + +import "github.com/docker/distribution" + +var _ distribution.Describable = &roLayer{} + +func (rl *roLayer) Descriptor() distribution.Descriptor { + return rl.descriptor +} diff --git a/extender/code/vendor/github.com/docker/docker/oci/defaults.go b/extender/code/vendor/github.com/docker/docker/oci/defaults.go new file mode 100644 index 000000000..35fbcd1d8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/oci/defaults.go @@ -0,0 +1,213 @@ +package oci // import "github.com/docker/docker/oci" + +import ( + "os" + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +func iPtr(i int64) *int64 { return &i } +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +// DefaultCapabilities returns a Linux kernel default capabilities +func DefaultCapabilities() []string { + return []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } +} + +// DefaultSpec returns the default spec used by docker for the current Platform +func DefaultSpec() specs.Spec { + return DefaultOSSpec(runtime.GOOS) +} + +// DefaultOSSpec returns the spec for a given OS +func DefaultOSSpec(osName string) specs.Spec { + if osName == "windows" { + return DefaultWindowsSpec() + } + return DefaultLinuxSpec() +} + +// DefaultWindowsSpec create a default spec for running Windows containers +func DefaultWindowsSpec() specs.Spec { + return specs.Spec{ + Version: specs.Version, + Windows: &specs.Windows{}, + Process: &specs.Process{}, + Root: &specs.Root{}, + } +} + +// DefaultLinuxSpec create a default spec for running Linux containers +func DefaultLinuxSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Process: &specs.Process{ + Capabilities: &specs.LinuxCapabilities{ + Bounding: DefaultCapabilities(), + Permitted: DefaultCapabilities(), + Inheritable: DefaultCapabilities(), + Effective: DefaultCapabilities(), + }, + }, + Root: &specs.Root{}, + } + s.Mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777"}, + }, + } + + s.Linux = &specs.Linux{ + MaskedPaths: []string{ + "/proc/asound", + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware", + }, + ReadonlyPaths: []string{ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + Namespaces: []specs.LinuxNamespace{ + {Type: "mount"}, + {Type: "network"}, + {Type: "uts"}, + {Type: "pid"}, + {Type: "ipc"}, + }, + // Devices implicitly contains the following devices: + // null, zero, full, random, urandom, tty, console, and ptmx. + // ptmx is a bind mount or symlink of the container's ptmx. + // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices + Devices: []specs.LinuxDevice{}, + Resources: &specs.LinuxResources{ + Devices: []specs.LinuxDeviceCgroup{ + { + Allow: false, + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(5), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(3), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(9), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(8), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(5), + Minor: iPtr(0), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(5), + Minor: iPtr(1), + Access: "rwm", + }, + { + Allow: false, + Type: "c", + Major: iPtr(10), + Minor: iPtr(229), + Access: "rwm", + }, + }, + }, + } + + // For LCOW support, populate a blank Windows spec + if runtime.GOOS == "windows" { + s.Windows = &specs.Windows{} + } + + return s +} diff --git a/extender/code/vendor/github.com/docker/docker/oci/devices_linux.go b/extender/code/vendor/github.com/docker/docker/oci/devices_linux.go new file mode 100644 index 000000000..46d4e1d32 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/oci/devices_linux.go @@ -0,0 +1,86 @@ +package oci // import "github.com/docker/docker/oci" + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.LinuxDevice object. +func Device(d *configs.Device) specs.LinuxDevice { + return specs.LinuxDevice{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: fmPtr(int64(d.FileMode)), + UID: u32Ptr(int64(d.Uid)), + GID: u32Ptr(int64(d.Gid)), + } +} + +func deviceCgroup(d *configs.Device) specs.LinuxDeviceCgroup { + t := string(d.Type) + return specs.LinuxDeviceCgroup{ + Allow: true, + Type: t, + Major: &d.Major, + Minor: &d.Minor, + Access: d.Permissions, + } +} + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { + resolvedPathOnHost := pathOnHost + + // check if it is a symbolic link + if src, e := os.Lstat(pathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, e := filepath.EvalSymlinks(pathOnHost); e == nil { + resolvedPathOnHost = linkedPathOnHost + } + } + + device, err := devices.DeviceFromPath(resolvedPathOnHost, cgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = pathInContainer + return append(devs, Device(device)), append(devPermissions, deviceCgroup(device)), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, cgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, pathInContainer, 1) + devs = append(devs, Device(childDevice)) + devPermissions = append(devPermissions, deviceCgroup(childDevice)) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, devPermissions, nil + } + + return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", pathOnHost, err) +} diff --git a/extender/code/vendor/github.com/docker/docker/oci/devices_unsupported.go b/extender/code/vendor/github.com/docker/docker/oci/devices_unsupported.go new file mode 100644 index 000000000..af6dd3bda --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/oci/devices_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux + +package oci // import "github.com/docker/docker/oci" + +import ( + "errors" + + "github.com/opencontainers/runc/libcontainer/configs" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.Device object. +// Not implemented +func Device(d *configs.Device) specs.LinuxDevice { return specs.LinuxDevice{} } + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +// Not implemented +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { + return nil, nil, errors.New("oci/devices: unsupported platform") +} diff --git a/extender/code/vendor/github.com/docker/docker/oci/namespaces.go b/extender/code/vendor/github.com/docker/docker/oci/namespaces.go new file mode 100644 index 000000000..5a2d8f208 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/oci/namespaces.go @@ -0,0 +1,13 @@ +package oci // import "github.com/docker/docker/oci" + +import "github.com/opencontainers/runtime-spec/specs-go" + +// RemoveNamespace removes the `nsType` namespace from OCI spec `s` +func RemoveNamespace(s *specs.Spec, nsType specs.LinuxNamespaceType) { + for i, n := range s.Linux.Namespaces { + if n.Type == nsType { + s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...) + return + } + } +} diff --git a/extender/code/vendor/github.com/docker/docker/oci/oci.go b/extender/code/vendor/github.com/docker/docker/oci/oci.go new file mode 100644 index 000000000..6c84ba348 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/oci/oci.go @@ -0,0 +1,67 @@ +package oci // import "github.com/docker/docker/oci" + +import ( + "fmt" + "regexp" + "strconv" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// nolint: gosimple +var deviceCgroupRuleRegex = regexp.MustCompile("^([acb]) ([0-9]+|\\*):([0-9]+|\\*) ([rwm]{1,3})$") + +// SetCapabilities sets the provided capabilities on the spec +// All capabilities are added if privileged is true +func SetCapabilities(s *specs.Spec, caplist []string) error { + s.Process.Capabilities.Effective = caplist + s.Process.Capabilities.Bounding = caplist + s.Process.Capabilities.Permitted = caplist + s.Process.Capabilities.Inheritable = caplist + // setUser has already been executed here + // if non root drop capabilities in the way execve does + if s.Process.User.UID != 0 { + s.Process.Capabilities.Effective = []string{} + s.Process.Capabilities.Permitted = []string{} + } + return nil +} + +// AppendDevicePermissionsFromCgroupRules takes rules for the devices cgroup to append to the default set +func AppendDevicePermissionsFromCgroupRules(devPermissions []specs.LinuxDeviceCgroup, rules []string) ([]specs.LinuxDeviceCgroup, error) { + for _, deviceCgroupRule := range rules { + ss := deviceCgroupRuleRegex.FindAllStringSubmatch(deviceCgroupRule, -1) + if len(ss[0]) != 5 { + return nil, fmt.Errorf("invalid device cgroup rule format: '%s'", deviceCgroupRule) + } + matches := ss[0] + + dPermissions := specs.LinuxDeviceCgroup{ + Allow: true, + Type: matches[1], + Access: matches[4], + } + if matches[2] == "*" { + major := int64(-1) + dPermissions.Major = &major + } else { + major, err := strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid major value in device cgroup rule format: '%s'", deviceCgroupRule) + } + dPermissions.Major = &major + } + if matches[3] == "*" { + minor := int64(-1) + dPermissions.Minor = &minor + } else { + minor, err := strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid minor value in device cgroup rule format: '%s'", deviceCgroupRule) + } + dPermissions.Minor = &minor + } + devPermissions = append(devPermissions, dPermissions) + } + return devPermissions, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/README.md b/extender/code/vendor/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 000000000..7307d9694 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/archive.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 000000000..bb623fa85 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,1284 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +var unpigzPath string + +func init() { + if path, err := exec.LookPath("unpigz"); err != nil { + logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library") + } else { + logrus.Debugf("Using unpigz binary found at path %s", path) + unpigzPath = path + } +} + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *idtools.Identity + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } +) + +// Archiver implements the Archiver interface and allows the reuse of most utility functions of +// this package with a pluggable Untar function. Also, to facilitate the passing of specific id +// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMapping *idtools.IdentityMapping +} + +// NewDefaultArchiver returns a new Archiver without any IdentityMapping +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) +} + +func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + if unpigzPath == "" { + return gzip.NewReader(buf) + } + + disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") + if disablePigzEnv != "" { + if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil { + return nil, err + } else if disablePigz { + return gzip.NewReader(buf) + } + } + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { + return ioutils.NewReadCloserWrapper(readBuf, func() error { + cancel() + return readBuf.Close() + }) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + + gzReader, err := gzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + ctx, cancel := context.WithCancel(context.Background()) + + xzReader, err := xzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + hdr.Name = canonicalTarName(name, fi.IsDir()) + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return nil +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IdentityMapping *idtools.IdentityMapping + ChownOpts *idtools.Identity + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IdentityMapping: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) string { + name = CanonicalTarNameForPath(name) + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + //check whether the file is overlayfs whiteout + //if yes, skip re-mapping container ID mappings. + isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP || err == syscall.EPERM { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + // EPERM occurs if modifying xattrs is not allowed. This can + // happen when running in userns with restrictions (ChromeOS). + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = pm.Matches(relFilePath) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMapping.RootPair() + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := remapIDs(idMapping, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this Archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMapping.RootPair() + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + return err + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + + errC <- func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMapping, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IdentityMapping returns the IdentityMapping of the archiver. +func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { + return archiver.IDMapping +} + +func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { + ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + }() + + return pipeR, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/archive_linux.go new file mode 100644 index 000000000..0601f7b0d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -0,0 +1,261 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containerd/continuity/fs" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{inUserNS: inUserNS} + } + return nil +} + +type overlayWhiteoutConverter struct { + inUserNS bool +} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + if err != nil { + if c.inUserNS { + if err = replaceDirWithOverlayOpaque(dir); err != nil { + return false, errors.Wrapf(err, "replaceDirWithOverlayOpaque(%q) failed", dir) + } + } else { + return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir) + } + } + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + if c.inUserNS { + // Ubuntu and a few distros support overlayfs in userns. + // + // Although we can't call mknod directly in userns (at least on bionic kernel 4.15), + // we can still create 0,0 char device using mknodChar0Overlay(). + // + // NOTE: we don't need this hack for the containerd snapshotter+unpack model. + if err := mknodChar0Overlay(originalPath); err != nil { + return false, errors.Wrapf(err, "failed to mknodChar0UserNS(%q)", originalPath) + } + } else { + return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath) + } + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} + +// mknodChar0Overlay creates 0,0 char device by mounting overlayfs and unlinking. +// This function can be used for creating 0,0 char device in userns on Ubuntu. +// +// Steps: +// * Mkdir lower,upper,merged,work +// * Create lower/dummy +// * Mount overlayfs +// * Unlink merged/dummy +// * Unmount overlayfs +// * Make sure a 0,0 char device is created as upper/dummy +// * Rename upper/dummy to cleansedOriginalPath +func mknodChar0Overlay(cleansedOriginalPath string) error { + dir := filepath.Dir(cleansedOriginalPath) + tmp, err := ioutil.TempDir(dir, "mc0o") + if err != nil { + return errors.Wrapf(err, "failed to create a tmp directory under %s", dir) + } + defer os.RemoveAll(tmp) + lower := filepath.Join(tmp, "l") + upper := filepath.Join(tmp, "u") + work := filepath.Join(tmp, "w") + merged := filepath.Join(tmp, "m") + for _, s := range []string{lower, upper, work, merged} { + if err := os.MkdirAll(s, 0700); err != nil { + return errors.Wrapf(err, "failed to mkdir %s", s) + } + } + dummyBase := "d" + lowerDummy := filepath.Join(lower, dummyBase) + if err := ioutil.WriteFile(lowerDummy, []byte{}, 0600); err != nil { + return errors.Wrapf(err, "failed to create a dummy lower file %s", lowerDummy) + } + mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) + // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. + if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { + return errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) + } + mergedDummy := filepath.Join(merged, dummyBase) + if err := os.Remove(mergedDummy); err != nil { + syscall.Unmount(merged, 0) + return errors.Wrapf(err, "failed to unlink %s", mergedDummy) + } + if err := syscall.Unmount(merged, 0); err != nil { + return errors.Wrapf(err, "failed to unmount %s", merged) + } + upperDummy := filepath.Join(upper, dummyBase) + if err := isChar0(upperDummy); err != nil { + return err + } + if err := os.Rename(upperDummy, cleansedOriginalPath); err != nil { + return errors.Wrapf(err, "failed to rename %s to %s", upperDummy, cleansedOriginalPath) + } + return nil +} + +func isChar0(path string) error { + osStat, err := os.Stat(path) + if err != nil { + return errors.Wrapf(err, "failed to stat %s", path) + } + st, ok := osStat.Sys().(*syscall.Stat_t) + if !ok { + return errors.Errorf("got unsupported stat for %s", path) + } + if os.FileMode(st.Mode)&syscall.S_IFMT != syscall.S_IFCHR { + return errors.Errorf("%s is not a character device, got mode=%d", path, st.Mode) + } + if st.Rdev != 0 { + return errors.Errorf("%s is not a 0,0 character device, got Rdev=%d", path, st.Rdev) + } + return nil +} + +// replaceDirWithOverlayOpaque replaces path with a new directory with trusted.overlay.opaque +// xattr. The contents of the directory are preserved. +func replaceDirWithOverlayOpaque(path string) error { + if path == "/" { + return errors.New("replaceDirWithOverlayOpaque: path must not be \"/\"") + } + dir := filepath.Dir(path) + tmp, err := ioutil.TempDir(dir, "rdwoo") + if err != nil { + return errors.Wrapf(err, "failed to create a tmp directory under %s", dir) + } + defer os.RemoveAll(tmp) + // newPath is a new empty directory crafted with trusted.overlay.opaque xattr. + // we copy the content of path into newPath, remove path, and rename newPath to path. + newPath, err := createDirWithOverlayOpaque(tmp) + if err != nil { + return errors.Wrapf(err, "createDirWithOverlayOpaque(%q) failed", tmp) + } + if err := fs.CopyDir(newPath, path); err != nil { + return errors.Wrapf(err, "CopyDir(%q, %q) failed", newPath, path) + } + if err := os.RemoveAll(path); err != nil { + return err + } + return os.Rename(newPath, path) +} + +// createDirWithOverlayOpaque creates a directory with trusted.overlay.opaque xattr, +// without calling setxattr, so as to allow creating opaque dir in userns on Ubuntu. +func createDirWithOverlayOpaque(tmp string) (string, error) { + lower := filepath.Join(tmp, "l") + upper := filepath.Join(tmp, "u") + work := filepath.Join(tmp, "w") + merged := filepath.Join(tmp, "m") + for _, s := range []string{lower, upper, work, merged} { + if err := os.MkdirAll(s, 0700); err != nil { + return "", errors.Wrapf(err, "failed to mkdir %s", s) + } + } + dummyBase := "d" + lowerDummy := filepath.Join(lower, dummyBase) + if err := os.MkdirAll(lowerDummy, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create a dummy lower directory %s", lowerDummy) + } + mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) + // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. + if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { + return "", errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) + } + mergedDummy := filepath.Join(merged, dummyBase) + if err := os.Remove(mergedDummy); err != nil { + syscall.Unmount(merged, 0) + return "", errors.Wrapf(err, "failed to rmdir %s", mergedDummy) + } + // upperDummy becomes a 0,0-char device file here + if err := os.Mkdir(mergedDummy, 0700); err != nil { + syscall.Unmount(merged, 0) + return "", errors.Wrapf(err, "failed to mkdir %s", mergedDummy) + } + // upperDummy becomes a directory with trusted.overlay.opaque xattr + // (but can't be verified in userns) + if err := syscall.Unmount(merged, 0); err != nil { + return "", errors.Wrapf(err, "failed to unmount %s", merged) + } + upperDummy := filepath.Join(upper, dummyBase) + return upperDummy, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/archive_other.go new file mode 100644 index 000000000..65a73354c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter { + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 000000000..1eec912b7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,114 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) string { + return p // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.Identity, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 000000000..ae6b89fd7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,67 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) string { + return filepath.ToSlash(p) +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.Identity, error) { + // no notion of file ownership mapping yet on Windows + return idtools.Identity{UID: 0, GID: 0}, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/changes.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 000000000..aedb91b03 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,445 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar doesn't have sub-second mtime precision. The go tar +// writer (1.10+) does when using PAX format, but we round times to seconds +// to ensure archives have the same hashes for backwards compatibility. +// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. +// +// Non-sub-second is problematic when we apply changes via tar +// files. We handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a.Equal(b) || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + !bytes.Equal(oldChild.capability, newChild.capability) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 000000000..f8792b3d4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,286 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 000000000..ba744741c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 000000000..06217b716 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,43 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size or modification time for dirs, its not a good + // measure of change. See https://github.com/moby/moby/issues/9874 + // for a description of the issue with modification time, and + // https://github.com/moby/moby/pull/11422 for the change. + // (Note that in the Windows implementation of this function, + // modification time IS taken as a change). See + // https://github.com/moby/moby/pull/37982 for more information. + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 000000000..9906685e4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,34 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Note there is slight difference between the Linux and Windows + // implementations here. Due to https://github.com/moby/moby/issues/9874, + // and the fix at https://github.com/moby/moby/pull/11422, Linux does not + // consider a change to the directory time as a change. Windows on NTFS + // does. See https://github.com/moby/moby/pull/37982 for more information. + + if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/copy.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 000000000..57fddac07 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,480 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in the separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { + // Ensure paths are in platform semantics + cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) + originalPath = strings.Replace(originalPath, "/", string(sep), -1) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath, sep) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(sep) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { + cleanedPath += string(sep) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string, sep byte) bool { + return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string, sep byte) bool { + return len(path) > 0 && path[len(path)-1] == sep +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(filepath.FromSlash(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(os.PathSeparator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + opts := TarResourceRebaseOpts(sourceBase, rebaseName) + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + return TarWithOptions(sourceDir, opts) +} + +// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase +// parameters to be sent to TarWithOptions (the TarOptions struct) +func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { + filter := []string{sourceBase} + return &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + } +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Stat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path, os.PathSeparator): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + // srcContent tar stream, as served by TarWithOptions(), is + // definitely in PAX format, but tar.Next() mistakenly guesses it + // as USTAR, which creates a problem: if the newBase is >100 + // characters long, WriteHeader() returns an error like + // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". + // + // To fix, set the format to PAX here. See docker/for-linux issue #484. + hdr.Format = tar.FormatPAX + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// TODO @gupta-ak. These might have to be changed in the future to be +// continuity driver aware as well to support LCOW. + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path, os.PathSeparator) && + filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && + !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path, os.PathSeparator) && + !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 000000000..3958364f5 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 000000000..a878d1bac --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/diff.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 000000000..146e21fe1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,260 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600, "") + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(idMapping, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + if runtime.GOOS != "windows" { + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) + } + + if decompress { + decompLayer, err := DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompLayer.Close() + layer = decompLayer + } + return UnpackLayer(dest, layer, options) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 000000000..797143ee8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = (1 << 30) - 2 + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 000000000..f58bf227f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 000000000..4c072a87e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/extender/code/vendor/github.com/docker/docker/pkg/archive/wrap.go b/extender/code/vendor/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 000000000..85435694c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go b/extender/code/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go new file mode 100644 index 000000000..6bb285123 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go @@ -0,0 +1,49 @@ +package broadcaster // import "github.com/docker/docker/pkg/broadcaster" + +import ( + "io" + "sync" +) + +// Unbuffered accumulates multiple io.WriteCloser by stream. +type Unbuffered struct { + mu sync.Mutex + writers []io.WriteCloser +} + +// Add adds new io.WriteCloser. +func (w *Unbuffered) Add(writer io.WriteCloser) { + w.mu.Lock() + w.writers = append(w.writers, writer) + w.mu.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *Unbuffered) Write(p []byte) (n int, err error) { + w.mu.Lock() + var evict []int + for i, sw := range w.writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + evict = append(evict, i) + } + } + for n, i := range evict { + w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) + } + w.mu.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *Unbuffered) Clean() error { + w.mu.Lock() + for _, sw := range w.writers { + sw.Close() + } + w.writers = nil + w.mu.Unlock() + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go new file mode 100644 index 000000000..2d9d66283 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -0,0 +1,73 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMapping *idtools.IdentityMapping) *archive.Archiver { + if idMapping == nil { + idMapping = &idtools.IdentityMapping{} + } + return &archive.Archiver{ + Untar: Untar, + IDMapping: idMapping, + } +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMapping.RootPair() + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go new file mode 100644 index 000000000..5df8afd66 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,88 @@ +// +build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + w.Close() + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go new file mode 100644 index 000000000..f2973132a --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,22 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 000000000..9802fad51 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,113 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/mount" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + // if the engine is running in a user namespace we need to use actual chroot + if rsystem.RunningInUserNS() { + return realChroot(path) + } + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // Make everything in new ns slave. + // Don't use `private` here as this could race where the mountns gets a + // reference to a mount and an unmount from the host does not propagate, + // which could potentially cause transient errors for other operations, + // even though this should be relatively small window here `slave` should + // not cause any problems. + if err := mount.MakeRSlave("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := unix.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 000000000..9a1ee5875 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,12 @@ +// +build !windows,!linux + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import "golang.org/x/sys/unix" + +func chroot(path string) error { + if err := unix.Chroot(path); err != nil { + return err + } + return unix.Chdir("/") +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go new file mode 100644 index 000000000..7712cc17c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go new file mode 100644 index 000000000..d96a09f8f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,130 @@ +//+build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir string + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := rsystem.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go new file mode 100644 index 000000000..8f3f3a4a8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go new file mode 100644 index 000000000..a15e4bb83 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go new file mode 100644 index 000000000..15ed874e7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +func init() { +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/containerfs/archiver.go b/extender/code/vendor/github.com/docker/docker/pkg/containerfs/archiver.go new file mode 100644 index 000000000..fed0a07d7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/containerfs/archiver.go @@ -0,0 +1,205 @@ +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// TarFunc provides a function definition for a custom Tar function +type TarFunc func(string, *archive.TarOptions) (io.ReadCloser, error) + +// UntarFunc provides a function definition for a custom Untar function +type UntarFunc func(io.Reader, string, *archive.TarOptions) error + +// Archiver provides a similar implementation of the archive.Archiver package with the rootfs abstraction +type Archiver struct { + SrcDriver Driver + DstDriver Driver + Tar TarFunc + Untar UntarFunc + IDMapping *idtools.IdentityMapping +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + tarArchive, err := archiver.Tar(src, &archive.TarOptions{Compression: archive.Uncompressed}) + if err != nil { + return err + } + defer tarArchive.Close() + options := &archive.TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(tarArchive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + tarArchive, err := archiver.SrcDriver.Open(src) + if err != nil { + return err + } + defer tarArchive.Close() + options := &archive.TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(tarArchive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := archiver.SrcDriver.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + + identity := idtools.Identity{UID: archiver.IDMapping.RootPair().UID, GID: archiver.IDMapping.RootPair().GID} + + // Create dst, copy src's content into it + if err := idtools.MkdirAllAndChownNew(dst, 0755, identity); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcDriver := archiver.SrcDriver + dstDriver := archiver.DstDriver + + srcSt, err := srcDriver.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == dstDriver.Separator() { + dst = dstDriver.Join(dst, srcDriver.Base(src)) + } + + // The original call was system.MkdirAll, which is just + // os.MkdirAll on not-Windows and changed for Windows. + if dstDriver.OS() == "windows" { + // Now we are WCOW + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + return err + } + } else { + // We can just use the driver.MkdirAll function + if err := dstDriver.MkdirAll(dstDriver.Dir(dst), 0700); err != nil { + return err + } + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + errC <- func() error { + defer w.Close() + + srcF, err := srcDriver.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = dstDriver.Base(dst) + if dstDriver.OS() == "windows" { + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + } else { + hdr.Mode = int64(os.FileMode(hdr.Mode)) + } + + if err := remapIDs(archiver.IDMapping, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, dstDriver.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IdentityMapping returns the IdentityMapping of the archiver. +func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { + return archiver.IDMapping +} + +func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { + ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go b/extender/code/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go new file mode 100644 index 000000000..7bb1d8c36 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go @@ -0,0 +1,87 @@ +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import ( + "path/filepath" + "runtime" + + "github.com/containerd/continuity/driver" + "github.com/containerd/continuity/pathdriver" + "github.com/docker/docker/pkg/symlink" +) + +// ContainerFS is that represents a root file system +type ContainerFS interface { + // Path returns the path to the root. Note that this may not exist + // on the local system, so the continuity operations must be used + Path() string + + // ResolveScopedPath evaluates the given path scoped to the root. + // For example, if root=/a, and path=/b/c, then this function would return /a/b/c. + // If rawPath is true, then the function will not preform any modifications + // before path resolution. Otherwise, the function will clean the given path + // by making it an absolute path. + ResolveScopedPath(path string, rawPath bool) (string, error) + + Driver +} + +// Driver combines both continuity's Driver and PathDriver interfaces with a Platform +// field to determine the OS. +type Driver interface { + // OS returns the OS where the rootfs is located. Essentially, + // runtime.GOOS for everything aside from LCOW, which is "linux" + OS() string + + // Architecture returns the hardware architecture where the + // container is located. + Architecture() string + + // Driver & PathDriver provide methods to manipulate files & paths + driver.Driver + pathdriver.PathDriver +} + +// NewLocalContainerFS is a helper function to implement daemon's Mount interface +// when the graphdriver mount point is a local path on the machine. +func NewLocalContainerFS(path string) ContainerFS { + return &local{ + path: path, + Driver: driver.LocalDriver, + PathDriver: pathdriver.LocalPathDriver, + } +} + +// NewLocalDriver provides file and path drivers for a local file system. They are +// essentially a wrapper around the `os` and `filepath` functions. +func NewLocalDriver() Driver { + return &local{ + Driver: driver.LocalDriver, + PathDriver: pathdriver.LocalPathDriver, + } +} + +type local struct { + path string + driver.Driver + pathdriver.PathDriver +} + +func (l *local) Path() string { + return l.path +} + +func (l *local) ResolveScopedPath(path string, rawPath bool) (string, error) { + cleanedPath := path + if !rawPath { + cleanedPath = cleanScopedPath(path) + } + return symlink.FollowSymlinkInScope(filepath.Join(l.path, cleanedPath), l.path) +} + +func (l *local) OS() string { + return runtime.GOOS +} + +func (l *local) Architecture() string { + return runtime.GOARCH +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go new file mode 100644 index 000000000..6a9945951 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import "path/filepath" + +// cleanScopedPath preappends a to combine with a mnt path. +func cleanScopedPath(path string) string { + return filepath.Join(string(filepath.Separator), path) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go new file mode 100644 index 000000000..9fb708462 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go @@ -0,0 +1,15 @@ +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import "path/filepath" + +// cleanScopedPath removes the C:\ syntax, and prepares to combine +// with a volume path +func cleanScopedPath(path string) string { + if len(path) >= 2 { + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + path = path[2:] + } + } + return filepath.Join(string(filepath.Separator), path) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/directory/directory.go b/extender/code/vendor/github.com/docker/docker/pkg/directory/directory.go new file mode 100644 index 000000000..51d4a6ea2 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/directory/directory.go @@ -0,0 +1,26 @@ +package directory // import "github.com/docker/docker/pkg/directory" + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + + infos, err := ioutil.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/directory/directory_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/directory/directory_unix.go new file mode 100644 index 000000000..f56dd7a8f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/directory/directory_unix.go @@ -0,0 +1,54 @@ +// +build linux freebsd darwin + +package directory // import "github.com/docker/docker/pkg/directory" + +import ( + "context" + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(ctx context.Context, dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[inode]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[inode] = struct{}{} + + size += s + + return nil + }) + return +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/directory/directory_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/directory/directory_windows.go new file mode 100644 index 000000000..f07f24188 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/directory/directory_windows.go @@ -0,0 +1,42 @@ +package directory // import "github.com/docker/docker/pkg/directory" + +import ( + "context" + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(ctx context.Context, dir string) (size int64, err error) { + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + size += s + + return nil + }) + return +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go b/extender/code/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go new file mode 100644 index 000000000..8b6cb56f1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go @@ -0,0 +1,40 @@ +// Package filenotify provides a mechanism for watching file(s) for changes. +// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. +// These are wrapped up in a common interface so that either can be used interchangeably in your code. +package filenotify // import "github.com/docker/docker/pkg/filenotify" + +import "github.com/fsnotify/fsnotify" + +// FileWatcher is an interface for implementing file notification watchers +type FileWatcher interface { + Events() <-chan fsnotify.Event + Errors() <-chan error + Add(name string) error + Remove(name string) error + Close() error +} + +// New tries to use an fs-event watcher, and falls back to the poller if there is an error +func New() (FileWatcher, error) { + if watcher, err := NewEventWatcher(); err == nil { + return watcher, nil + } + return NewPollingWatcher(), nil +} + +// NewPollingWatcher returns a poll-based file watcher +func NewPollingWatcher() FileWatcher { + return &filePoller{ + events: make(chan fsnotify.Event), + errors: make(chan error), + } +} + +// NewEventWatcher returns an fs-event based file watcher +func NewEventWatcher() (FileWatcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + return &fsNotifyWatcher{watcher}, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go b/extender/code/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go new file mode 100644 index 000000000..5a737d653 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go @@ -0,0 +1,18 @@ +package filenotify // import "github.com/docker/docker/pkg/filenotify" + +import "github.com/fsnotify/fsnotify" + +// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifier interface +type fsNotifyWatcher struct { + *fsnotify.Watcher +} + +// Events returns the fsnotify event channel receiver +func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { + return w.Watcher.Events +} + +// Errors returns the fsnotify error channel receiver +func (w *fsNotifyWatcher) Errors() <-chan error { + return w.Watcher.Errors +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/filenotify/poller.go b/extender/code/vendor/github.com/docker/docker/pkg/filenotify/poller.go new file mode 100644 index 000000000..6161d4ab7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/filenotify/poller.go @@ -0,0 +1,204 @@ +package filenotify // import "github.com/docker/docker/pkg/filenotify" + +import ( + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/sirupsen/logrus" + + "github.com/fsnotify/fsnotify" +) + +var ( + // errPollerClosed is returned when the poller is closed + errPollerClosed = errors.New("poller is closed") + // errNoSuchWatch is returned when trying to remove a watch that doesn't exist + errNoSuchWatch = errors.New("watch does not exist") +) + +// watchWaitTime is the time to wait between file poll loops +const watchWaitTime = 200 * time.Millisecond + +// filePoller is used to poll files for changes, especially in cases where fsnotify +// can't be run (e.g. when inotify handles are exhausted) +// filePoller satisfies the FileWatcher interface +type filePoller struct { + // watches is the list of files currently being polled, close the associated channel to stop the watch + watches map[string]chan struct{} + // events is the channel to listen to for watch events + events chan fsnotify.Event + // errors is the channel to listen to for watch errors + errors chan error + // mu locks the poller for modification + mu sync.Mutex + // closed is used to specify when the poller has already closed + closed bool +} + +// Add adds a filename to the list of watches +// once added the file is polled for changes in a separate goroutine +func (w *filePoller) Add(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return errPollerClosed + } + + f, err := os.Open(name) + if err != nil { + return err + } + fi, err := os.Stat(name) + if err != nil { + f.Close() + return err + } + + if w.watches == nil { + w.watches = make(map[string]chan struct{}) + } + if _, exists := w.watches[name]; exists { + f.Close() + return fmt.Errorf("watch exists") + } + chClose := make(chan struct{}) + w.watches[name] = chClose + + go w.watch(f, fi, chClose) + return nil +} + +// Remove stops and removes watch with the specified name +func (w *filePoller) Remove(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + return w.remove(name) +} + +func (w *filePoller) remove(name string) error { + if w.closed { + return errPollerClosed + } + + chClose, exists := w.watches[name] + if !exists { + return errNoSuchWatch + } + close(chClose) + delete(w.watches, name) + return nil +} + +// Events returns the event channel +// This is used for notifications on events about watched files +func (w *filePoller) Events() <-chan fsnotify.Event { + return w.events +} + +// Errors returns the errors channel +// This is used for notifications about errors on watched files +func (w *filePoller) Errors() <-chan error { + return w.errors +} + +// Close closes the poller +// All watches are stopped, removed, and the poller cannot be added to +func (w *filePoller) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return nil + } + + for name := range w.watches { + w.remove(name) + } + w.closed = true + return nil +} + +// sendEvent publishes the specified event to the events channel +func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { + select { + case w.events <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// sendErr publishes the specified error to the errors channel +func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { + select { + case w.errors <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// watch is responsible for polling the specified file for changes +// upon finding changes to a file or errors, sendEvent/sendErr is called +func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { + defer f.Close() + for { + select { + case <-time.After(watchWaitTime): + case <-chClose: + logrus.Debugf("watch for %s closed", f.Name()) + return + } + + fi, err := os.Stat(f.Name()) + if err != nil { + // if we got an error here and lastFi is not set, we can presume that nothing has changed + // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called + if lastFi == nil { + continue + } + // If it doesn't exist at this point, it must have been removed + // no need to send the error here since this is a valid operation + if os.IsNotExist(err) { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { + return + } + lastFi = nil + continue + } + // at this point, send the error + if err := w.sendErr(err, chClose); err != nil { + return + } + continue + } + + if lastFi == nil { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.Mode() != lastFi.Mode() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/extender/code/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go new file mode 100644 index 000000000..34f1c726f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -0,0 +1,298 @@ +package fileutils // import "github.com/docker/docker/pkg/fileutils" + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/sirupsen/logrus" +) + +// PatternMatcher allows checking paths against a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// NewPatternMatcher creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = p[1:] + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used to filter file paths. +type Pattern struct { + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + + if p.regexp == nil { + if err := p.compile(); err != nil { + return false, filepath.ErrBadPattern + } + } + + b := p.regexp.MatchString(path) + + return b, nil +} + +func (p *Pattern) compile() error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + pm, err := NewPatternMatcher(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.Matches(file) +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/extender/code/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 000000000..e40cc271b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils // import "github.com/docker/docker/pkg/fileutils" + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go new file mode 100644 index 000000000..565396f1c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils // import "github.com/docker/docker/pkg/fileutils" + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go new file mode 100644 index 000000000..3f1ebb656 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils // import "github.com/docker/docker/pkg/fileutils" + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go new file mode 100644 index 000000000..47ecd0c09 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -0,0 +1,109 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/idtools" +) + +// GetStatic returns the home directory for the current user without calling +// os/user.Current(). This is useful for static-linked binary on glibc-based +// system, because a call to os/user.Current() in a static binary leads to +// segfault due to a glibc issue that won't be fixed in a short term. +// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +func GetStatic() (string, error) { + uid := os.Getuid() + usr, err := idtools.LookupUID(uid) + if err != nil { + return "", err + } + return usr.Home, nil +} + +// GetRuntimeDir returns XDG_RUNTIME_DIR. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + return xdgRuntimeDir, nil + } + return "", errors.New("could not get XDG_RUNTIME_DIR") +} + +// StickRuntimeDirContents sets the sticky bit on files that are under +// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. +// +// StickyRuntimeDir returns slice of sticked files. +// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func StickRuntimeDirContents(files []string) ([]string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + // ignore error if runtimeDir is empty + return nil, nil + } + runtimeDir, err = filepath.Abs(runtimeDir) + if err != nil { + return nil, err + } + var sticked []string + for _, f := range files { + f, err = filepath.Abs(f) + if err != nil { + return sticked, err + } + if strings.HasPrefix(f, runtimeDir+"/") { + if err = stick(f); err != nil { + return sticked, err + } + sticked = append(sticked, f) + } + } + return sticked, nil +} + +func stick(f string) error { + st, err := os.Stat(f) + if err != nil { + return err + } + m := st.Mode() + m |= os.ModeSticky + return os.Chmod(f, m) +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/extender/code/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go new file mode 100644 index 000000000..f0a363ded --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -0,0 +1,33 @@ +// +build !linux + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" +) + +// GetStatic is not needed for non-linux systems. +// (Precisely, it is needed only for glibc-based linux systems.) +func GetStatic() (string, error) { + return "", errors.New("homedir.GetStatic() is not supported on this system") +} + +// GetRuntimeDir is unsupported on non-linux system. +func GetRuntimeDir() (string, error) { + return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") +} + +// StickRuntimeDirContents is unsupported on non-linux system. +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") +} + +// GetDataHome is unsupported on non-linux system. +func GetDataHome() (string, error) { + return "", errors.New("homedir.GetDataHome() is not supported on this system") +} + +// GetConfigHome is unsupported on non-linux system. +func GetConfigHome() (string, error) { + return "", errors.New("homedir.GetConfigHome() is not supported on this system") +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go new file mode 100644 index 000000000..d85e12448 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go new file mode 100644 index 000000000..2f81813b2 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/extender/code/vendor/github.com/docker/docker/pkg/idtools/idtools.go new file mode 100644 index 000000000..230422eac --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -0,0 +1,267 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName = "/etc/subuid" + subgidFileName = "/etc/subgid" +) + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership. +// Note that unlike os.Mkdir(), this function does not return IsExist error +// in case path already exists. +func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// Identity is either a UID and GID pair or a SID (but not both) +type Identity struct { + UID int + GID int + SID string +} + +// IdentityMapping contains a mappings of UIDs and GIDs +type IdentityMapping struct { + uids []IDMap + gids []IDMap +} + +// NewIdentityMapping takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func NewIdentityMapping(username, groupname string) (*IdentityMapping, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, err + } + if len(subuidRanges) == 0 { + return nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return &IdentityMapping{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping { + return &IdentityMapping{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IdentityMapping) RootPair() Identity { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return Identity{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IdentityMapping) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IdentityMapping) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IdentityMapping) GIDs() []IDMap { + return i.gids +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go new file mode 100644 index 000000000..fb239743a --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -0,0 +1,231 @@ +// +build !windows + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + + var paths []string + + stat, err := system.Stat(path) + if err == nil { + if !stat.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if !chownExisting { + return nil + } + + // short-circuit--we were called with an existing directory and chown was requested + return lazyChown(path, owner.UID, owner.GID, stat) + } + + if os.IsNotExist(err) { + paths = []string{path} + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode, ""); err != nil { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := lazyChown(pathComponent, owner.UID, owner.GID, nil); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair Identity) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} + +// lazyChown performs a chown only if the uid/gid don't match what's requested +// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the +// dir is on an NFS share, so don't call chown unless we absolutely must. +func lazyChown(p string, uid, gid int, stat *system.StatT) error { + if stat == nil { + var err error + stat, err = system.Stat(p) + if err != nil { + return err + } + } + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + return os.Chown(p, uid, gid) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 000000000..4ae38a1b1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -0,0 +1,25 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// This is currently a wrapper around MkdirAll, however, since currently +// permissions aren't set through this path, the identity isn't utilized. +// Ownership is handled elsewhere, but in the future could be support here +// too. +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode, ""); err != nil { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, identity Identity) bool { + return true +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 000000000..6272c5a40 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/extender/code/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 000000000..e7c4d6311 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go new file mode 100644 index 000000000..903ac4501 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/buffer.go new file mode 100644 index 000000000..466f79294 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 000000000..d4bbf3c9d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go new file mode 100644 index 000000000..534d66ac2 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -0,0 +1,162 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 000000000..1f657bd3d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,157 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "io" +) + +// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser +// It calls the given callback function when closed. It should be constructed +// with NewReadCloserWrapper +type ReadCloserWrapper struct { + io.Reader + closer func() error +} + +// Close calls back the passed closer function +func (r *ReadCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &ReadCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go new file mode 100644 index 000000000..dc894f913 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go new file mode 100644 index 000000000..ecaba2e36 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -0,0 +1,16 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io/ioutil" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 000000000..91b8d1826 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 000000000..61c679497 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/extender/code/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 000000000..a68b566ce --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,283 @@ +package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/docker/docker/pkg/term" + "github.com/docker/go-units" + "github.com/morikuni/aec" +) + +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time isalways the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// JSONError wraps a concrete Code and Message, `Code` is +// is an integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` + // If true, don't show xB/yB + HideCounts bool `json:"hidecounts,omitempty"` + Units string `json:"units,omitempty"` + nowFunc func() time.Time + winSize int +} + +func (p *JSONProgress) String() string { + var ( + width = p.width() + pbBox string + numbersBox string + timeLeftBox string + ) + if p.Current <= 0 && p.Total <= 0 { + return "" + } + if p.Total <= 0 { + switch p.Units { + case "": + current := units.HumanSize(float64(p.Current)) + return fmt.Sprintf("%8v", current) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := p.now().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// shim for testing +func (p *JSONProgress) now() time.Time { + if p.nowFunc == nil { + p.nowFunc = func() time.Time { + return time.Now().UTC() + } + } + return p.nowFunc() +} + +// shim for testing +func (p *JSONProgress) width() int { + if p.winSize != 0 { + return p.winSize + } + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + return int(ws.Width) + } + return 200 +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated + // Aux contains out-of-band data, such as digests for push signing and image id after building. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +func clearLine(out io.Writer) { + eraseMode := aec.EraseModes.All + cl := aec.EraseLine(eraseMode) + fmt.Fprint(out, cl) +} + +func cursorUp(out io.Writer, l uint) { + fmt.Fprint(out, aec.Up(l)) +} + +func cursorDown(out io.Writer, l uint) { + fmt.Fprint(out, aec.Down(l)) +} + +// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the +// entire current line when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("authentication is required") + } + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + clearLine(out) + endl = "\r" + fmt.Fprintf(out, endl) + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]uint) + ) + + for { + var diff uint + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = uint(len(ids)) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + } + diff = uint(len(ids)) - line + if isTerminal { + cursorUp(out, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]uint) + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + cursorDown(out, diff) + } + if err != nil { + return err + } + } + return nil +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output stream +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/extender/code/vendor/github.com/docker/docker/pkg/longpath/longpath.go new file mode 100644 index 000000000..4177affba --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath // import "github.com/docker/docker/pkg/longpath" + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/flags.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/flags.go new file mode 100644 index 000000000..ffd473311 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/flags.go @@ -0,0 +1,137 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go new file mode 100644 index 000000000..ef35ef905 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go @@ -0,0 +1,49 @@ +// +build freebsd,cgo + +package mount // import "github.com/docker/docker/pkg/mount" + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 + mntDetach = 0 +) diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/flags_linux.go new file mode 100644 index 000000000..a1b199a31 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/flags_linux.go @@ -0,0 +1,87 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = unix.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = unix.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = unix.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = unix.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = unix.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = unix.MS_BIND | unix.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = unix.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = unix.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = unix.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = unix.MS_SLAVE | unix.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = unix.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = unix.MS_SHARED | unix.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = unix.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = unix.MS_STRICTATIME + + mntDetach = unix.MNT_DETACH +) diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go new file mode 100644 index 000000000..cc6c47590 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount // import "github.com/docker/docker/pkg/mount" + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 + mntDetach = 0 +) diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/mount.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/mount.go new file mode 100644 index 000000000..4afd63c42 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -0,0 +1,159 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "sort" + "strconv" + "strings" + + "github.com/sirupsen/logrus" +) + +// mountError records an error from mount or unmount operation +type mountError struct { + op string + source, target string + flags uintptr + data string + err error +} + +func (e *mountError) Error() string { + out := e.op + " " + + if e.source != "" { + out += e.source + ":" + e.target + } else { + out += e.target + } + + if e.flags != uintptr(0) { + out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16) + } + if e.data != "" { + out += ", data: " + e.data + } + + out += ": " + e.err.Error() + return out +} + +// Cause returns the underlying cause of the error +func (e *mountError) Cause() error { + return e.err +} + +// FilterFunc is a type defining a callback function +// to filter out unwanted entries. It takes a pointer +// to an Info struct (not fully populated, currently +// only Mountpoint is filled in), and returns two booleans: +// - skip: true if the entry should be skipped +// - stop: true if parsing should be stopped after the entry +type FilterFunc func(*Info) (skip, stop bool) + +// PrefixFilter discards all entries whose mount points +// do not start with a prefix specified +func PrefixFilter(prefix string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(m.Mountpoint, prefix) + return skip, false + } +} + +// SingleEntryFilter looks for a specific entry +func SingleEntryFilter(mp string) FilterFunc { + return func(m *Info) (bool, bool) { + if m.Mountpoint == mp { + return false, true // don't skip, stop now + } + return true, false // skip, keep going + } +} + +// ParentsFilter returns all entries whose mount points +// can be parents of a path specified, discarding others. +// For example, given `/var/lib/docker/something`, entries +// like `/var/lib/docker`, `/var` and `/` are returned. +func ParentsFilter(path string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(path, m.Mountpoint) + return skip, false + } +} + +// GetMounts retrieves a list of mounts for the current running process, +// with an optional filter applied (use nil for no filter). +func GetMounts(f FilterFunc) ([]*Info, error) { + return parseMountTable(f) +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo. +func Mounted(mountpoint string) (bool, error) { + entries, err := GetMounts(SingleEntryFilter(mountpoint)) + if err != nil { + return false, err + } + + return len(entries) > 0, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. +func Unmount(target string) error { + return unmount(target, mntDetach) +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepsest mount first. +func RecursiveUnmount(target string) error { + mounts, err := parseMountTable(PrefixFilter(target)) + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Slice(mounts, func(i, j int) bool { + return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + }) + + for i, m := range mounts { + logrus.Debugf("Trying to unmount %s", m.Mountpoint) + err = unmount(m.Mountpoint, mntDetach) + if err != nil { + if i == len(mounts)-1 { // last mount + if mounted, e := Mounted(m.Mountpoint); e != nil || mounted { + return err + } + } else { + // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem + logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) + } + } + + logrus.Debugf("Unmounted %s", m.Mountpoint) + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go new file mode 100644 index 000000000..09ad36060 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -0,0 +1,59 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + return &mountError{ + op: "mount", + source: device, + target: target, + flags: flag, + err: syscall.Errno(errno), + } + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go new file mode 100644 index 000000000..a0a1ad236 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -0,0 +1,73 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "golang.org/x/sys/unix" +) + +const ( + // ptypes is the set propagation types. + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | unix.MS_REC | unix.MS_SILENT + + // broflags is the combination of bind and read only + broflags = unix.MS_BIND | unix.MS_RDONLY +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&unix.MS_REMOUNT != 0, device == "", device == "none": + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { + return &mountError{ + op: "mount", + source: device, + target: target, + flags: oflags, + data: data, + err: err, + } + } + } + + if flags&ptypes != 0 { + // Change the propagation type. + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { + return &mountError{ + op: "remount", + target: target, + flags: flags & pflags, + err: err, + } + } + } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil { + return &mountError{ + op: "remount-ro", + target: target, + flags: oflags | unix.MS_REMOUNT, + err: err, + } + + } + } + + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go new file mode 100644 index 000000000..c3e5aec27 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount // import "github.com/docker/docker/pkg/mount" + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo.go new file mode 100644 index 000000000..ecd03fc02 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo.go @@ -0,0 +1,40 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 000000000..36c89dc1a --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,55 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable(filter FilterFunc) ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + var skip, stop bool + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } + } + + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + + out = append(out, &mountinfo) + if stop { + break + } + } + return out, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go new file mode 100644 index 000000000..c1dba01fc --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -0,0 +1,132 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { + s := bufio.NewScanner(r) + out := []*Info{} + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + /* + 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options + */ + + text := s.Text() + fields := strings.Split(text, " ") + numFields := len(fields) + if numFields < 10 { + // should be at least 10 fields + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) + } + + p := &Info{} + // ignore any numbers parsing errors, as there should not be any + p.ID, _ = strconv.Atoi(fields[0]) + p.Parent, _ = strconv.Atoi(fields[1]) + mm := strings.Split(fields[2], ":") + if len(mm) != 2 { + return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) + } + p.Major, _ = strconv.Atoi(mm[0]) + p.Minor, _ = strconv.Atoi(mm[1]) + + p.Root = fields[3] + p.Mountpoint = fields[4] + p.Opts = fields[5] + + var skip, stop bool + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } + } + + // one or more optional fields, when a separator (-) + i := 6 + for ; i < numFields && fields[i] != "-"; i++ { + switch i { + case 6: + p.Optional = fields[6] + default: + /* NOTE there might be more optional fields before the such as + fields[7]...fields[N] (where N < sepIndex), although + as of Linux kernel 4.15 the only known ones are + mount propagation flags in fields[6]. The correct + behavior is to ignore any unknown optional fields. + */ + break + } + } + if i == numFields { + return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text) + } + + // There should be 3 fields after the separator... + if i+4 > numFields { + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text) + } + // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name + // (like "//serv/My Documents") _may_ end up having a space in the last field + // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs + // option unc= is ignored, so a space should not appear. In here we ignore + // those "extra" fields caused by extra spaces. + p.Fstype = fields[i+1] + p.Source = fields[i+2] + p.VfsOpts = fields[i+3] + + out = append(out, p) + if stop { + break + } + } + return out, nil +} + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable(filter FilterFunc) ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f, filter) +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f, nil) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 000000000..fd16d3ed6 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd freebsd,!cgo + +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "fmt" + "runtime" +) + +func parseMountTable(f FilterFunc) ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go new file mode 100644 index 000000000..27e0f6976 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +func parseMountTable(f FilterFunc) ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 000000000..8a100f0bc --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,71 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +// MakeMount ensures that the file or directory given is a mount point, +// bind mounting it to itself it case it is not. +func MakeMount(mnt string) error { + mounted, err := Mounted(mnt) + if err != nil { + return err + } + if mounted { + return nil + } + + return Mount(mnt, mnt, "none", "bind") +} + +func ensureMountedAs(mountPoint, options string) error { + if err := MakeMount(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go new file mode 100644 index 000000000..4be427685 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go @@ -0,0 +1,22 @@ +// +build !windows + +package mount // import "github.com/docker/docker/pkg/mount" + +import "golang.org/x/sys/unix" + +func unmount(target string, flags int) error { + err := unix.Unmount(target, flags) + if err == nil || err == unix.EINVAL { + // Ignore "not mounted" error here. Note the same error + // can be returned if flags are invalid, so this code + // assumes that the flags value is always correct. + return nil + } + + return &mountError{ + op: "umount", + target: target, + flags: uintptr(flags), + err: err, + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go b/extender/code/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go new file mode 100644 index 000000000..a88ad3577 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go @@ -0,0 +1,7 @@ +// +build windows + +package mount // import "github.com/docker/docker/pkg/mount" + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go new file mode 100644 index 000000000..94780ef61 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 000000000..6e599eebc --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 000000000..8a9aa3122 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,35 @@ +// +build linux freebsd openbsd + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "bytes" + + "github.com/sirupsen/logrus" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + // Remove the \x00 from the release for Atoi to parse correctly + return ParseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)])) +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 000000000..b7b15a1fd --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,51 @@ +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "fmt" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { + return KVI, err + } + defer k.Close() + + blex, _, err := k.GetStringValue("BuildLabEx") + if err != nil { + return KVI, err + } + KVI.kvi = blex + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + dwVersion, err := windows.GetVersion() + if err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 000000000..212ff4502 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,17 @@ +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import "golang.org/x/sys/unix" + +// Utsname represents the system name structure. +// It is passthrough for unix.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname unix.Utsname + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 000000000..b2139b60e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 000000000..97906e4cd --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux + +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/extender/code/vendor/github.com/docker/docker/pkg/parsers/parsers.go new file mode 100644 index 000000000..068e52480 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/parsers/parsers.go @@ -0,0 +1,97 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers // import "github.com/docker/docker/pkg/parsers" + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintListMaximum parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. Values larger than `maximum` cause an error if max is non zero, +// in order to stop the map becoming excessively large. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintListMaximum(val string, maximum int) (map[int]bool, error) { + return parseUintList(val, maximum) +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + return parseUintList(val, 0) +} + +func parseUintList(val string, maximum int) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + if maximum != 0 && v > maximum { + return nil, fmt.Errorf("value of out range, maximum is %d", maximum) + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + if maximum != 0 && max > maximum { + return nil, fmt.Errorf("value of out range, maximum is %d", maximum) + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/plugingetter/getter.go b/extender/code/vendor/github.com/docker/docker/pkg/plugingetter/getter.go new file mode 100644 index 000000000..370e0d5b9 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/plugingetter/getter.go @@ -0,0 +1,52 @@ +package plugingetter // import "github.com/docker/docker/pkg/plugingetter" + +import ( + "net" + "time" + + "github.com/docker/docker/pkg/plugins" +) + +const ( + // Lookup doesn't update RefCount + Lookup = 0 + // Acquire increments RefCount + Acquire = 1 + // Release decrements RefCount + Release = -1 +) + +// CompatPlugin is an abstraction to handle both v2(new) and v1(legacy) plugins. +type CompatPlugin interface { + Name() string + ScopedPath(string) string + IsV1() bool + PluginWithV1Client +} + +// PluginWithV1Client is a plugin that directly utilizes the v1/http plugin client +type PluginWithV1Client interface { + Client() *plugins.Client +} + +// PluginAddr is a plugin that exposes the socket address for creating custom clients rather than the built-in `*plugins.Client` +type PluginAddr interface { + Addr() net.Addr + Timeout() time.Duration + Protocol() string +} + +// CountedPlugin is a plugin which is reference counted. +type CountedPlugin interface { + Acquire() + Release() + CompatPlugin +} + +// PluginGetter is the interface implemented by Store +type PluginGetter interface { + Get(name, capability string, mode int) (CompatPlugin, error) + GetAllByCap(capability string) ([]CompatPlugin, error) + GetAllManagedPluginsByCap(capability string) []CompatPlugin + Handle(capability string, callback func(string, *plugins.Client)) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/plugins/client.go b/extender/code/vendor/github.com/docker/docker/pkg/plugins/client.go new file mode 100644 index 000000000..035330535 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/plugins/client.go @@ -0,0 +1,242 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" +) + +const ( + defaultTimeOut = 30 +) + +func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { + tr := &http.Transport{} + + if tlsConfig != nil { + c, err := tlsconfig.Client(*tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + } + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + socket := u.Host + if socket == "" { + // valid local socket addresses have the host empty. + socket = u.Path + } + if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { + return nil, err + } + scheme := httpScheme(u) + + return transport.NewHTTPTransport(tr, scheme, socket), nil +} + +// NewClient creates a new plugin client (http). +func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, 0), nil +} + +// NewClientWithTimeout creates a new plugin client (http). +func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeout time.Duration) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, timeout), nil +} + +// newClientWithTransport creates a new plugin client with a given transport. +func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Client { + return &Client{ + http: &http.Client{ + Transport: tr, + Timeout: timeout, + }, + requestFactory: tr, + } +} + +// Client represents a plugin client. +type Client struct { + http *http.Client // http client to use + requestFactory transport.RequestFactory +} + +// RequestOpts is the set of options that can be passed into a request +type RequestOpts struct { + Timeout time.Duration +} + +// WithRequestTimeout sets a timeout duration for plugin requests +func WithRequestTimeout(t time.Duration) func(*RequestOpts) { + return func(o *RequestOpts) { + o.Timeout = t + } +} + +// Call calls the specified method with the specified arguments for the plugin. +// It will retry for 30 seconds if a failure occurs when calling. +func (c *Client) Call(serviceMethod string, args, ret interface{}) error { + return c.CallWithOptions(serviceMethod, args, ret) +} + +// CallWithOptions is just like call except it takes options +func (c *Client) CallWithOptions(serviceMethod string, args interface{}, ret interface{}, opts ...func(*RequestOpts)) error { + var buf bytes.Buffer + if args != nil { + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + } + body, err := c.callWithRetry(serviceMethod, &buf, true, opts...) + if err != nil { + return err + } + defer body.Close() + if ret != nil { + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + } + return nil +} + +// Stream calls the specified method with the specified arguments for the plugin and returns the response body +func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return nil, err + } + return c.callWithRetry(serviceMethod, &buf, true) +} + +// SendFile calls the specified method, and passes through the IO stream +func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { + body, err := c.callWithRetry(serviceMethod, data, true) + if err != nil { + return err + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil +} + +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool, reqOpts ...func(*RequestOpts)) (io.ReadCloser, error) { + var retries int + start := time.Now() + + var opts RequestOpts + for _, o := range reqOpts { + o(&opts) + } + + for { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + cancelRequest := func() {} + if opts.Timeout > 0 { + var ctx context.Context + ctx, cancelRequest = context.WithTimeout(req.Context(), opts.Timeout) + req = req.WithContext(ctx) + } + + resp, err := c.http.Do(req) + if err != nil { + cancelRequest() + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + time.Sleep(timeOff) + continue + } + + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + cancelRequest() + if err != nil { + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} + } + + // Plugins' Response(s) should have an Err field indicating what went + // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just + // return the string(body) + type responseErr struct { + Err string + } + remoteErr := responseErr{} + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} + } + } + // old way... + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} + } + return ioutils.NewReadCloserWrapper(resp.Body, func() error { + err := resp.Body.Close() + cancelRequest() + return err + }), nil + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} + +func httpScheme(u *url.URL) string { + scheme := u.Scheme + if scheme != "https" { + scheme = "http" + } + return scheme +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/extender/code/vendor/github.com/docker/docker/pkg/plugins/discovery.go new file mode 100644 index 000000000..4b79bd29a --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -0,0 +1,154 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var ( + // ErrNotFound plugin not found + ErrNotFound = errors.New("plugin not found") + socketsPath = "/run/docker/plugins" +) + +// localRegistry defines a registry that is local (using unix socket). +type localRegistry struct{} + +func newLocalRegistry() localRegistry { + return localRegistry{} +} + +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + dirEntries, err := ioutil.ReadDir(socketsPath) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "error reading dir entries") + } + + for _, fi := range dirEntries { + if fi.IsDir() { + fi, err = os.Stat(filepath.Join(socketsPath, fi.Name(), fi.Name()+".sock")) + if err != nil { + continue + } + } + + if fi.Mode()&os.ModeSocket != 0 { + names = append(names, strings.TrimSuffix(filepath.Base(fi.Name()), filepath.Ext(fi.Name()))) + } + } + + for _, p := range specsPaths { + dirEntries, err := ioutil.ReadDir(p) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "error reading dir entries") + } + + for _, fi := range dirEntries { + if fi.IsDir() { + infos, err := ioutil.ReadDir(filepath.Join(p, fi.Name())) + if err != nil { + continue + } + + for _, info := range infos { + if strings.TrimSuffix(info.Name(), filepath.Ext(info.Name())) == fi.Name() { + fi = info + break + } + } + } + + ext := filepath.Ext(fi.Name()) + switch ext { + case ".spec", ".json": + plugin := strings.TrimSuffix(fi.Name(), ext) + names = append(names, plugin) + default: + } + } + } + return names, nil +} + +// Plugin returns the plugin registered with the given name (or returns an error). +func (l *localRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return NewLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, errors.Wrapf(ErrNotFound, "could not find plugin %s in v1 plugin registry", name) +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return NewLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.name = name + if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + p.activateWait = sync.NewCond(&sync.Mutex{}) + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go new file mode 100644 index 000000000..58058f282 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package plugins // import "github.com/docker/docker/pkg/plugins" + +var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go new file mode 100644 index 000000000..f0af3477f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go @@ -0,0 +1,8 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "os" + "path/filepath" +) + +var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/plugins/errors.go b/extender/code/vendor/github.com/docker/docker/pkg/plugins/errors.go new file mode 100644 index 000000000..6735c304b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formatted string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/extender/code/vendor/github.com/docker/docker/pkg/plugins/plugins.go new file mode 100644 index 000000000..28c06ff69 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -0,0 +1,337 @@ +// Package plugins provides structures and helper functions to manage Docker +// plugins. +// +// Docker discovers plugins by looking for them in the plugin directory whenever +// a user or container tries to use one by name. UNIX domain socket files must +// be located under /run/docker/plugins, whereas spec files can be located +// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled +// by the Registry interface, which lets you list all plugins or get a plugin by +// its name if it exists. +// +// The plugins need to implement an HTTP server and bind this to the UNIX socket +// or the address specified in the spec files. +// A handshake is send at /Plugin.Activate, and plugins are expected to return +// a Manifest with a list of Docker subsystems which this plugin implements. +// +// In order to use a plugins, you can use the ``Get`` with the name of the +// plugin and the subsystem it implements. +// +// plugin, err := plugins.Get("example", "VolumeDriver") +// if err != nil { +// return fmt.Errorf("Error looking up volume plugin example: %v", err) +// } +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "errors" + "sync" + "time" + + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" +) + +// ProtocolSchemeHTTPV1 is the name of the protocol used for interacting with plugins using this package. +const ProtocolSchemeHTTPV1 = "moby.plugins.http/v1" + +var ( + // ErrNotImplements is returned if the plugin does not implement the requested driver. + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +type extpointHandlers struct { + sync.RWMutex + extpointHandlers map[string][]func(string, *Client) +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))} +) + +// Manifest lists what a plugin implements. +type Manifest struct { + // List of subsystem the plugin implements. + Implements []string +} + +// Plugin is the definition of a docker plugin. +type Plugin struct { + // Name of the plugin + name string + // Address of the plugin + Addr string + // TLS configuration of the plugin + TLSConfig *tlsconfig.Options + // Client attached to the plugin + client *Client + // Manifest of the plugin (see above) + Manifest *Manifest `json:"-"` + + // wait for activation to finish + activateWait *sync.Cond + // error produced by activation + activateErr error + // keeps track of callback handlers run against this plugin + handlersRun bool +} + +// Name returns the name of the plugin. +func (p *Plugin) Name() string { + return p.name +} + +// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. +func (p *Plugin) Client() *Client { + return p.client +} + +// Protocol returns the protocol name/version used for plugins in this package. +func (p *Plugin) Protocol() string { + return ProtocolSchemeHTTPV1 +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return true +} + +// NewLocalPlugin creates a new local plugin. +func NewLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + name: name, + Addr: addr, + // TODO: change to nil + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + activateWait: sync.NewCond(&sync.Mutex{}), + } +} + +func (p *Plugin) activate() error { + p.activateWait.L.Lock() + + if p.activated() { + p.runHandlers() + p.activateWait.L.Unlock() + return p.activateErr + } + + p.activateErr = p.activateWithLock() + + p.runHandlers() + p.activateWait.L.Unlock() + p.activateWait.Broadcast() + return p.activateErr +} + +// runHandlers runs the registered handlers for the implemented plugin types +// This should only be run after activation, and while the activation lock is held. +func (p *Plugin) runHandlers() { + if !p.activated() { + return + } + + handlers.RLock() + if !p.handlersRun { + for _, iface := range p.Manifest.Implements { + hdlrs, handled := handlers.extpointHandlers[iface] + if !handled { + continue + } + for _, handler := range hdlrs { + handler(p.name, p.client) + } + } + p.handlersRun = true + } + handlers.RUnlock() + +} + +// activated returns if the plugin has already been activated. +// This should only be called with the activation lock held +func (p *Plugin) activated() bool { + return p.Manifest != nil +} + +func (p *Plugin) activateWithLock() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.client = c + + m := new(Manifest) + if err = p.client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + p.Manifest = m + return nil +} + +func (p *Plugin) waitActive() error { + p.activateWait.L.Lock() + for !p.activated() && p.activateErr == nil { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin) implements(kind string) bool { + if p.Manifest == nil { + return false + } + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + +func load(name string) (*Plugin, error) { + return loadWithRetry(name, true) +} + +func loadWithRetry(name string, retry bool) (*Plugin, error) { + registry := newLocalRegistry() + start := time.Now() + + var retries int + for { + pl, err := registry.Plugin(name) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + time.Sleep(timeOff) + continue + } + + storage.Lock() + if pl, exists := storage.plugins[name]; exists { + storage.Unlock() + return pl, pl.activate() + } + storage.plugins[name] = pl + storage.Unlock() + + err = pl.activate() + + if err != nil { + storage.Lock() + delete(storage.plugins, name) + storage.Unlock() + } + + return pl, err + } +} + +func get(name string) (*Plugin, error) { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + return pl, pl.activate() + } + return load(name) +} + +// Get returns the plugin given the specified name and requested implementation. +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + if err := pl.waitActive(); err == nil && pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil + } + return nil, ErrNotImplements +} + +// Handle adds the specified function to the extpointHandlers. +func Handle(iface string, fn func(string, *Client)) { + handlers.Lock() + hdlrs, ok := handlers.extpointHandlers[iface] + if !ok { + hdlrs = []func(string, *Client){} + } + + hdlrs = append(hdlrs, fn) + handlers.extpointHandlers[iface] = hdlrs + + storage.Lock() + for _, p := range storage.plugins { + p.activateWait.L.Lock() + if p.activated() && p.implements(iface) { + p.handlersRun = false + } + p.activateWait.L.Unlock() + } + storage.Unlock() + + handlers.Unlock() +} + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan *plLoad, len(pluginNames)) + var wg sync.WaitGroup + for _, name := range pluginNames { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + chPl <- &plLoad{pl, nil} + continue + } + + wg.Add(1) + go func(name string) { + defer wg.Done() + pl, err := loadWithRetry(name, false) + chPl <- &plLoad{pl, err} + }(name) + } + + wg.Wait() + close(chPl) + + var out []*Plugin + for pl := range chPl { + if pl.err != nil { + logrus.Error(pl.err) + continue + } + if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go new file mode 100644 index 000000000..cdfbe9345 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package plugins // import "github.com/docker/docker/pkg/plugins" + +// ScopedPath returns the path scoped to the plugin's rootfs. +// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. +func (p *Plugin) ScopedPath(s string) string { + return s +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go new file mode 100644 index 000000000..ddf1d786c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go @@ -0,0 +1,7 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +// ScopedPath returns the path scoped to the plugin's rootfs. +// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. +func (p *Plugin) ScopedPath(s string) string { + return s +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/extender/code/vendor/github.com/docker/docker/pkg/plugins/transport/http.go new file mode 100644 index 000000000..76d3bdb71 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/plugins/transport/http.go @@ -0,0 +1,36 @@ +package transport // import "github.com/docker/docker/pkg/plugins/transport" + +import ( + "io" + "net/http" +) + +// httpTransport holds an http.RoundTripper +// and information about the scheme and address the transport +// sends request to. +type httpTransport struct { + http.RoundTripper + scheme string + addr string +} + +// NewHTTPTransport creates a new httpTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { + return httpTransport{ + RoundTripper: r, + scheme: scheme, + addr: addr, + } +} + +// NewRequest creates a new http.Request and sets the URL +// scheme and address with the transport's fields. +func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := newHTTPRequest(path, data) + if err != nil { + return nil, err + } + req.URL.Scheme = t.scheme + req.URL.Host = t.addr + return req, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/extender/code/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go new file mode 100644 index 000000000..9cb13335a --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go @@ -0,0 +1,36 @@ +package transport // import "github.com/docker/docker/pkg/plugins/transport" + +import ( + "io" + "net/http" + "strings" +) + +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" + +// RequestFactory defines an interface that +// transports can implement to create new requests. +type RequestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + +// Transport defines an interface that plugin transports +// must implement. +type Transport interface { + http.RoundTripper + RequestFactory +} + +// newHTTPRequest creates a new request with a path and a body. +func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + req.Header.Add("Accept", VersionMimetype) + return req, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/pools/pools.go b/extender/code/vendor/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 000000000..46339c282 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -0,0 +1,137 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools // import "github.com/docker/docker/pkg/pools" + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +const buffer32K = 32 * 1024 + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) + buffer32KPool = newBufferPoolWithSize(buffer32K) +) + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPoolWithSize(size int) *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { return make([]byte, size) }, + }, + } +} + +func (bp *bufferPool) Get() []byte { + return bp.pool.Get().([]byte) +} + +func (bp *bufferPool) Put(b []byte) { + bp.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := buffer32KPool.Get() + written, err = io.CopyBuffer(dst, src, buf) + buffer32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/progress/progress.go b/extender/code/vendor/github.com/docker/docker/pkg/progress/progress.go new file mode 100644 index 000000000..32300914e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/progress/progress.go @@ -0,0 +1,93 @@ +package progress // import "github.com/docker/docker/pkg/progress" + +import ( + "fmt" +) + +// Progress represents the progress of a transfer. +type Progress struct { + ID string + + // Progress contains a Message or... + Message string + + // ...progress of an action + Action string + Current int64 + Total int64 + + // If true, don't show xB/yB + HideCounts bool + // If not empty, use units instead of bytes for counts + Units string + + // Aux contains extra information not presented to the user, such as + // digests for push signing. + Aux interface{} + + LastUpdate bool +} + +// Output is an interface for writing progress information. It's +// like a writer for progress, but we don't call it Writer because +// that would be confusing next to ProgressReader (also, because it +// doesn't implement the io.Writer interface). +type Output interface { + WriteProgress(Progress) error +} + +type chanOutput chan<- Progress + +func (out chanOutput) WriteProgress(p Progress) error { + // FIXME: workaround for panic in #37735 + defer func() { + recover() + }() + out <- p + return nil +} + +// ChanOutput returns an Output that writes progress updates to the +// supplied channel. +func ChanOutput(progressChan chan<- Progress) Output { + return chanOutput(progressChan) +} + +type discardOutput struct{} + +func (discardOutput) WriteProgress(Progress) error { + return nil +} + +// DiscardOutput returns an Output that discards progress +func DiscardOutput() Output { + return discardOutput{} +} + +// Update is a convenience function to write a progress update to the channel. +func Update(out Output, id, action string) { + out.WriteProgress(Progress{ID: id, Action: action}) +} + +// Updatef is a convenience function to write a printf-formatted progress update +// to the channel. +func Updatef(out Output, id, format string, a ...interface{}) { + Update(out, id, fmt.Sprintf(format, a...)) +} + +// Message is a convenience function to write a progress message to the channel. +func Message(out Output, id, message string) { + out.WriteProgress(Progress{ID: id, Message: message}) +} + +// Messagef is a convenience function to write a printf-formatted progress +// message to the channel. +func Messagef(out Output, id, format string, a ...interface{}) { + Message(out, id, fmt.Sprintf(format, a...)) +} + +// Aux sends auxiliary information over a progress interface, which will not be +// formatted for the UI. This is used for things such as push signing. +func Aux(out Output, a interface{}) { + out.WriteProgress(Progress{Aux: a}) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/extender/code/vendor/github.com/docker/docker/pkg/progress/progressreader.go new file mode 100644 index 000000000..7ca07dc64 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/progress/progressreader.go @@ -0,0 +1,66 @@ +package progress // import "github.com/docker/docker/pkg/progress" + +import ( + "io" + "time" + + "golang.org/x/time/rate" +) + +// Reader is a Reader with progress bar. +type Reader struct { + in io.ReadCloser // Stream to read from + out Output // Where to send progress bar to + size int64 + current int64 + lastUpdate int64 + id string + action string + rateLimiter *rate.Limiter +} + +// NewProgressReader creates a new ProgressReader. +func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { + return &Reader{ + in: in, + out: out, + size: size, + id: id, + action: action, + rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), + } +} + +func (p *Reader) Read(buf []byte) (n int, err error) { + read, err := p.in.Read(buf) + p.current += int64(read) + updateEvery := int64(1024 * 512) //512kB + if p.size > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int64(0.01 * float64(p.size)); increment < updateEvery { + updateEvery = increment + } + } + if p.current-p.lastUpdate > updateEvery || err != nil { + p.updateProgress(err != nil && read == 0) + p.lastUpdate = p.current + } + + return read, err +} + +// Close closes the progress reader and its underlying reader. +func (p *Reader) Close() error { + if p.current < p.size { + // print a full progress bar when closing prematurely + p.current = p.size + p.updateProgress(false) + } + return p.in.Close() +} + +func (p *Reader) updateProgress(last bool) { + if last || p.current == p.size || p.rateLimiter.Allow() { + p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/pubsub/publisher.go b/extender/code/vendor/github.com/docker/docker/pkg/pubsub/publisher.go new file mode 100644 index 000000000..76033ed9e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/pubsub/publisher.go @@ -0,0 +1,121 @@ +package pubsub // import "github.com/docker/docker/pkg/pubsub" + +import ( + "sync" + "time" +) + +var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} + +// NewPublisher creates a new pub/sub publisher to broadcast messages. +// The duration is used as the send timeout as to not block the publisher publishing +// messages to other clients if one client is slow or unresponsive. +// The buffer is used when creating new channels for subscribers. +func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { + return &Publisher{ + buffer: buffer, + timeout: publishTimeout, + subscribers: make(map[subscriber]topicFunc), + } +} + +type subscriber chan interface{} +type topicFunc func(v interface{}) bool + +// Publisher is basic pub/sub structure. Allows to send events and subscribe +// to them. Can be safely used from multiple goroutines. +type Publisher struct { + m sync.RWMutex + buffer int + timeout time.Duration + subscribers map[subscriber]topicFunc +} + +// Len returns the number of subscribers for the publisher +func (p *Publisher) Len() int { + p.m.RLock() + i := len(p.subscribers) + p.m.RUnlock() + return i +} + +// Subscribe adds a new subscriber to the publisher returning the channel. +func (p *Publisher) Subscribe() chan interface{} { + return p.SubscribeTopic(nil) +} + +// SubscribeTopic adds a new subscriber that filters messages sent by a topic. +func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { + ch := make(chan interface{}, p.buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// SubscribeTopicWithBuffer adds a new subscriber that filters messages sent by a topic. +// The returned channel has a buffer of the specified size. +func (p *Publisher) SubscribeTopicWithBuffer(topic topicFunc, buffer int) chan interface{} { + ch := make(chan interface{}, buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// Evict removes the specified subscriber from receiving any more messages. +func (p *Publisher) Evict(sub chan interface{}) { + p.m.Lock() + delete(p.subscribers, sub) + close(sub) + p.m.Unlock() +} + +// Publish sends the data in v to all subscribers currently registered with the publisher. +func (p *Publisher) Publish(v interface{}) { + p.m.RLock() + if len(p.subscribers) == 0 { + p.m.RUnlock() + return + } + + wg := wgPool.Get().(*sync.WaitGroup) + for sub, topic := range p.subscribers { + wg.Add(1) + go p.sendTopic(sub, topic, v, wg) + } + wg.Wait() + wgPool.Put(wg) + p.m.RUnlock() +} + +// Close closes the channels to all subscribers registered with the publisher. +func (p *Publisher) Close() { + p.m.Lock() + for sub := range p.subscribers { + delete(p.subscribers, sub) + close(sub) + } + p.m.Unlock() +} + +func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { + defer wg.Done() + if topic != nil && !topic(v) { + return + } + + // send under a select as to not block if the receiver is unavailable + if p.timeout > 0 { + select { + case sub <- v: + case <-time.After(p.timeout): + } + return + } + + select { + case sub <- v: + default: + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/reexec/README.md b/extender/code/vendor/github.com/docker/docker/pkg/reexec/README.md new file mode 100644 index 000000000..6658f69b6 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/reexec/README.md @@ -0,0 +1,5 @@ +# reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/extender/code/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/reexec/command_linux.go new file mode 100644 index 000000000..efea71794 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" + "syscall" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: unix.SIGTERM, + }, + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/reexec/command_unix.go new file mode 100644 index 000000000..ceaabbdee --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/reexec/command_unix.go @@ -0,0 +1,23 @@ +// +build freebsd darwin + +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/extender/code/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go new file mode 100644 index 000000000..e7eed2424 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux,!windows,!freebsd,!darwin + +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +func Self() string { + return "" +} + +// Command is unsupported on operating systems apart from Linux, Windows, and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/reexec/command_windows.go new file mode 100644 index 000000000..438226890 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/reexec/command_windows.go @@ -0,0 +1,21 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/extender/code/vendor/github.com/docker/docker/pkg/reexec/reexec.go new file mode 100644 index 000000000..f8ccddd59 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/signal/README.md b/extender/code/vendor/github.com/docker/docker/pkg/signal/README.md new file mode 100644 index 000000000..2b237a594 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/signal/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/extender/code/vendor/github.com/docker/docker/pkg/signal/signal.go b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal.go new file mode 100644 index 000000000..88ef7b5ea --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal.go @@ -0,0 +1,54 @@ +// Package signal provides helper functions for dealing with signals across +// various operating systems. +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" +) + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + var handledSigs []os.Signal + for _, s := range SignalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} + +// ParseSignal translates a string to a valid syscall signal. +// It returns an error if the signal map doesn't include the given signal. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + s, err := strconv.Atoi(rawSignal) + if err == nil { + if s == 0 { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return syscall.Signal(s), nil + } + signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + if !ok { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return signal, nil +} + +// ValidSignalForPlatform returns true if a signal is valid on the platform +func ValidSignalForPlatform(sig syscall.Signal) bool { + for _, v := range SignalMap { + if v == sig { + return true + } + } + return false +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go new file mode 100644 index 000000000..ee5501e3d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go @@ -0,0 +1,41 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// SignalMap is a map of Darwin signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go new file mode 100644 index 000000000..764f90e26 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go @@ -0,0 +1,43 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// SignalMap is a map of FreeBSD signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "THR": syscall.SIGTHR, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_linux.go new file mode 100644 index 000000000..4013bded1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_linux.go @@ -0,0 +1,83 @@ +// +build !mips,!mipsle,!mips64,!mips64le + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 +) + +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "STKFLT": unix.SIGSTKFLT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go new file mode 100644 index 000000000..4c7989121 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go @@ -0,0 +1,84 @@ +// +build linux +// +build mips mipsle mips64 mips64le + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 127 +) + +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "SIGEMT": unix.SIGEMT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_unix.go new file mode 100644 index 000000000..a2aa4248f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_unix.go @@ -0,0 +1,21 @@ +// +build !windows + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) + +const ( + // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. + SIGCHLD = syscall.SIGCHLD + // SIGWINCH is a signal sent to a process when its controlling terminal changes its size + SIGWINCH = syscall.SIGWINCH + // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading + SIGPIPE = syscall.SIGPIPE + // DefaultStopSignal is the syscall signal used to stop a container in unix systems. + DefaultStopSignal = "SIGTERM" +) diff --git a/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go new file mode 100644 index 000000000..1fd25a83c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!darwin,!freebsd,!windows + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// SignalMap is an empty map of signals for unsupported platform. +var SignalMap = map[string]syscall.Signal{} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_windows.go new file mode 100644 index 000000000..65752f24a --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/signal/signal_windows.go @@ -0,0 +1,26 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) +const ( + SIGCHLD = syscall.Signal(0xff) + SIGWINCH = syscall.Signal(0xff) + SIGPIPE = syscall.Signal(0xff) + // DefaultStopSignal is the syscall signal used to stop a container in windows systems. + DefaultStopSignal = "15" +) + +// SignalMap is a map of "supported" signals. As per the comment in GOLang's +// ztypes_windows.go: "More invented values for signals". Windows doesn't +// really support signals in any way, shape or form that Unix does. +// +// We have these so that docker kill can be used to gracefully (TERM) and +// forcibly (KILL) terminate a container on Windows. +var SignalMap = map[string]syscall.Signal{ + "KILL": syscall.SIGKILL, + "TERM": syscall.SIGTERM, +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/signal/trap.go b/extender/code/vendor/github.com/docker/docker/pkg/signal/trap.go new file mode 100644 index 000000000..2a6e69fb5 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/signal/trap.go @@ -0,0 +1,104 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "fmt" + "os" + gosignal "os/signal" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + + "github.com/pkg/errors" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is +// skipped and the process is terminated immediately (allows force quit of stuck daemon) +// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. +// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while +// the docker daemon is not restarted and also running under systemd. +// Fixes https://github.com/docker/docker/issues/19728 +// +func Trap(cleanup func(), logger interface { + Info(args ...interface{}) +}) { + c := make(chan os.Signal, 1) + // we will handle INT, TERM, QUIT, SIGPIPE here + signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + if sig == syscall.SIGPIPE { + continue + } + + go func(sig os.Signal) { + logger.Info(fmt.Sprintf("Processing signal '%v'", sig)) + switch sig { + case os.Interrupt, syscall.SIGTERM: + if atomic.LoadUint32(&interruptCount) < 3 { + // Initiate the cleanup only once + if atomic.AddUint32(&interruptCount, 1) == 1 { + // Call the provided cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + // 3 SIGTERM/INT signals received; force exit without cleanup + logger.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") + } + case syscall.SIGQUIT: + DumpStacks("") + logger.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") + } + //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} + +const stacksLogNameTemplate = "goroutine-stacks-%s.log" + +// DumpStacks appends the runtime stack into file in dir and returns full path +// to that file. +func DumpStacks(dir string) (string, error) { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + var f *os.File + if dir != "" { + path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) + var err error + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + return "", errors.Wrap(err, "failed to open file to write the goroutine stacks") + } + defer f.Close() + defer f.Sync() + } else { + f = os.Stderr + } + if _, err := f.Write(buf); err != nil { + return "", errors.Wrap(err, "failed to write goroutine stacks") + } + return f.Name(), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/extender/code/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go new file mode 100644 index 000000000..b0456e580 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go @@ -0,0 +1,159 @@ +// Package streamformatter provides helper functions to format a stream. +package streamformatter // import "github.com/docker/docker/pkg/streamformatter" + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" +) + +const streamNewline = "\r\n" + +type jsonProgressFormatter struct{} + +func appendNewline(source []byte) []byte { + return append(source, []byte(streamNewline)...) +} + +// FormatStatus formats the specified objects according to the specified format (and id). +func FormatStatus(id, format string, a ...interface{}) []byte { + str := fmt.Sprintf(format, a...) + b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) + if err != nil { + return FormatError(err) + } + return appendNewline(b) +} + +// FormatError formats the error as a JSON object +func FormatError(err error) []byte { + jsonError, ok := err.(*jsonmessage.JSONError) + if !ok { + jsonError = &jsonmessage.JSONError{Message: err.Error()} + } + if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return appendNewline(b) + } + return []byte(`{"error":"format error"}` + streamNewline) +} + +func (sf *jsonProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return FormatStatus(id, format, a...) +} + +// formatProgress formats the progress information for a specified action. +func (sf *jsonProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + var auxJSON *json.RawMessage + if aux != nil { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return nil + } + auxJSON = new(json.RawMessage) + *auxJSON = auxJSONBytes + } + b, err := json.Marshal(&jsonmessage.JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + Aux: auxJSON, + }) + if err != nil { + return nil + } + return appendNewline(b) +} + +type rawProgressFormatter struct{} + +func (sf *rawProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return []byte(fmt.Sprintf(format, a...) + streamNewline) +} + +func (sf *rawProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + endl := "\r" + if progress.String() == "" { + endl += "\n" + } + return []byte(action + " " + progress.String() + endl) +} + +// NewProgressOutput returns a progress.Output object that can be passed to +// progress.NewProgressReader. +func NewProgressOutput(out io.Writer) progress.Output { + return &progressOutput{sf: &rawProgressFormatter{}, out: out, newLines: true} +} + +// NewJSONProgressOutput returns a progress.Output that formats output +// using JSON objects +func NewJSONProgressOutput(out io.Writer, newLines bool) progress.Output { + return &progressOutput{sf: &jsonProgressFormatter{}, out: out, newLines: newLines} +} + +type formatProgress interface { + formatStatus(id, format string, a ...interface{}) []byte + formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte +} + +type progressOutput struct { + sf formatProgress + out io.Writer + newLines bool +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *progressOutput) WriteProgress(prog progress.Progress) error { + var formatted []byte + if prog.Message != "" { + formatted = out.sf.formatStatus(prog.ID, prog.Message) + } else { + jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units} + formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) + } + _, err := out.out.Write(formatted) + if err != nil { + return err + } + + if out.newLines && prog.LastUpdate { + _, err = out.out.Write(out.sf.formatStatus("", "")) + return err + } + + return nil +} + +// AuxFormatter is a streamFormatter that writes aux progress messages +type AuxFormatter struct { + io.Writer +} + +// Emit emits the given interface as an aux progress message +func (sf *AuxFormatter) Emit(id string, aux interface{}) error { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return err + } + auxJSON := new(json.RawMessage) + *auxJSON = auxJSONBytes + msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Aux: auxJSON}) + if err != nil { + return err + } + msgJSON = appendNewline(msgJSON) + n, err := sf.Writer.Write(msgJSON) + if n != len(msgJSON) { + return io.ErrShortWrite + } + return err +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go b/extender/code/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go new file mode 100644 index 000000000..1473ed974 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go @@ -0,0 +1,47 @@ +package streamformatter // import "github.com/docker/docker/pkg/streamformatter" + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/pkg/jsonmessage" +) + +type streamWriter struct { + io.Writer + lineFormat func([]byte) string +} + +func (sw *streamWriter) Write(buf []byte) (int, error) { + formattedBuf := sw.format(buf) + n, err := sw.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +func (sw *streamWriter) format(buf []byte) []byte { + msg := &jsonmessage.JSONMessage{Stream: sw.lineFormat(buf)} + b, err := json.Marshal(msg) + if err != nil { + return FormatError(err) + } + return appendNewline(b) +} + +// NewStdoutWriter returns a writer which formats the output as json message +// representing stdout lines +func NewStdoutWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return string(buf) + }} +} + +// NewStderrWriter returns a writer which formats the output as json message +// representing stderr lines +func NewStderrWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return "\033[91m" + string(buf) + "\033[0m" + }} +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/stringid/README.md b/extender/code/vendor/github.com/docker/docker/pkg/stringid/README.md new file mode 100644 index 000000000..37a5098fd --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/extender/code/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/extender/code/vendor/github.com/docker/docker/pkg/stringid/stringid.go new file mode 100644 index 000000000..fa7d9166e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/stringid/stringid.go @@ -0,0 +1,99 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid // import "github.com/docker/docker/pkg/stringid" + +import ( + cryptorand "crypto/rand" + "encoding/hex" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "regexp" + "strconv" + "strings" + "time" +) + +const shortLen = 12 + +var ( + validShortID = regexp.MustCompile("^[a-f0-9]{12}$") + validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) +) + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + if len(id) > shortLen { + id = id[:shortLen] + } + return id +} + +func generateID(r io.Reader) string { + b := make([]byte, 32) + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { + return generateID(cryptorand.Reader) +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + return generateID(readerFunc(rand.Read)) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} + +func init() { + // safely set the seed globally so we generate random ids. Tries to use a + // crypto seed before falling back to time. + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + + rand.Seed(seed) +} + +type readerFunc func(p []byte) (int, error) + +func (fn readerFunc) Read(p []byte) (int, error) { + return fn(p) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/extender/code/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE new file mode 100644 index 000000000..5d80670bc --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/extender/code/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD new file mode 100644 index 000000000..2ee8768d3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2018 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/extender/code/vendor/github.com/docker/docker/pkg/symlink/README.md b/extender/code/vendor/github.com/docker/docker/pkg/symlink/README.md new file mode 100644 index 000000000..8dba54fd0 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/symlink/README.md @@ -0,0 +1,6 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, +as well as a Windows long-path aware version of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/extender/code/vendor/github.com/docker/docker/pkg/symlink/fs.go b/extender/code/vendor/github.com/docker/docker/pkg/symlink/fs.go new file mode 100644 index 000000000..7b894cde7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/symlink/fs.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + +package symlink // import "github.com/docker/docker/pkg/symlink" + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an +// absolute path. This function handles paths in a platform-agnostic manner. +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(filepath.FromSlash(path)) + if err != nil { + return "", err + } + root, err = filepath.Abs(filepath.FromSlash(root)) + if err != nil { + return "", err + } + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/outside" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if isDriveOrRoot(cleanP) { + // never Lstat "/" itself, or drive letters on Windows + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if system.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil +} + +// EvalSymlinks returns the path name after the evaluation of any symbolic +// links. +// If path is relative the result will be relative to the current directory, +// unless one of the components is an absolute symbolic link. +// This version has been updated to support long paths prepended with `\\?\`. +func EvalSymlinks(path string) (string, error) { + return evalSymlinks(path) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go new file mode 100644 index 000000000..c6dafcb0b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package symlink // import "github.com/docker/docker/pkg/symlink" + +import ( + "path/filepath" +) + +func evalSymlinks(path string) (string, error) { + return filepath.EvalSymlinks(path) +} + +func isDriveOrRoot(p string) bool { + return p == string(filepath.Separator) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go new file mode 100644 index 000000000..754761717 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go @@ -0,0 +1,169 @@ +package symlink // import "github.com/docker/docker/pkg/symlink" + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/longpath" + "golang.org/x/sys/windows" +) + +func toShort(path string) (string, error) { + p, err := windows.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetShortPathName says we can reuse buffer + n, err := windows.GetShortPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + if _, err = windows.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { + return "", err + } + } + return windows.UTF16ToString(b), nil +} + +func toLong(path string) (string, error) { + p, err := windows.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetLongPathName says we can reuse buffer + n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + n, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + b = b[:n] + return windows.UTF16ToString(b), nil +} + +func evalSymlinks(path string) (string, error) { + path, err := walkSymlinks(path) + if err != nil { + return "", err + } + + p, err := toShort(path) + if err != nil { + return "", err + } + p, err = toLong(p) + if err != nil { + return "", err + } + // windows.GetLongPathName does not change the case of the drive letter, + // but the result of EvalSymlinks must be unique, so we have + // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). + // Make drive letter upper case. + if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { + p = string(p[0]+'A'-'a') + p[1:] + } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { + p = p[:3] + string(p[4]+'A'-'a') + p[5:] + } + return filepath.Clean(p), nil +} + +const utf8RuneSelf = 0x80 + +func walkSymlinks(path string) (string, error) { + const maxIter = 255 + originalPath := path + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("EvalSymlinks: too many links in " + originalPath) + } + + // A path beginning with `\\?\` represents the root, so automatically + // skip that part and begin processing the next segment. + if strings.HasPrefix(path, longpath.Prefix) { + b.WriteString(longpath.Prefix) + path = path[4:] + continue + } + + // find next path component, p + var i = -1 + for j, c := range path { + if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { + i = j + break + } + } + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + if b.Len() == 0 { + // must be absolute path + b.WriteRune(filepath.Separator) + } + continue + } + + // If this is the first segment after the long path prefix, accept the + // current segment as a volume root or UNC share and move on to the next. + if b.String() == longpath.Prefix { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + fi, err := os.Lstat(b.String() + p) + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { + b.WriteRune(filepath.Separator) + } + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(b.String() + p) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + return filepath.Clean(b.String()), nil +} + +func isDriveOrRoot(p string) bool { + if p == string(filepath.Separator) { + return true + } + + length := len(p) + if length >= 2 { + if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) { + return true + } + } + return false +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/README.md b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/README.md new file mode 100644 index 000000000..c1530cef0 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/README.md @@ -0,0 +1 @@ +SysInfo stores information about which features a kernel supports. diff --git a/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go new file mode 100644 index 000000000..eea2d25bf --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go @@ -0,0 +1,12 @@ +// +build !linux,!windows + +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +import ( + "runtime" +) + +// NumCPU returns the number of CPUs +func NumCPU() int { + return runtime.NumCPU() +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go new file mode 100644 index 000000000..5f6c6df8c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go @@ -0,0 +1,42 @@ +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +import ( + "runtime" + "unsafe" + + "golang.org/x/sys/unix" +) + +// numCPU queries the system for the count of threads available +// for use to this process. +// +// Issues two syscalls. +// Returns 0 on errors. Use |runtime.NumCPU| in that case. +func numCPU() int { + // Gets the affinity mask for a process: The very one invoking this function. + pid, _, _ := unix.RawSyscall(unix.SYS_GETPID, 0, 0, 0) + + var mask [1024 / 64]uintptr + _, _, err := unix.RawSyscall(unix.SYS_SCHED_GETAFFINITY, pid, uintptr(len(mask)*8), uintptr(unsafe.Pointer(&mask[0]))) + if err != 0 { + return 0 + } + + // For every available thread a bit is set in the mask. + ncpu := 0 + for _, e := range mask { + if e == 0 { + continue + } + ncpu += int(popcnt(uint64(e))) + } + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go new file mode 100644 index 000000000..13523f671 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go @@ -0,0 +1,35 @@ +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +import ( + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + kernel32 = windows.NewLazySystemDLL("kernel32.dll") + getCurrentProcess = kernel32.NewProc("GetCurrentProcess") + getProcessAffinityMask = kernel32.NewProc("GetProcessAffinityMask") +) + +func numCPU() int { + // Gets the affinity mask for a process + var mask, sysmask uintptr + currentProcess, _, _ := getCurrentProcess.Call() + ret, _, _ := getProcessAffinityMask.Call(currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask))) + if ret == 0 { + return 0 + } + // For every available thread a bit is set in the mask. + ncpu := int(popcnt(uint64(mask))) + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go new file mode 100644 index 000000000..5fa5a5628 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go @@ -0,0 +1,155 @@ +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +import "github.com/docker/docker/pkg/parsers" + +// SysInfo stores information about which features a kernel supports. +// TODO Windows: Factor out platform specific capabilities. +type SysInfo struct { + // Whether the kernel supports AppArmor or not + AppArmor bool + // Whether the kernel supports Seccomp or not + Seccomp bool + + cgroupMemInfo + cgroupCPUInfo + cgroupBlkioInfo + cgroupCpusetInfo + cgroupPids + + // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work + IPv4ForwardingDisabled bool + + // Whether bridge-nf-call-iptables is supported or not + BridgeNFCallIPTablesDisabled bool + + // Whether bridge-nf-call-ip6tables is supported or not + BridgeNFCallIP6TablesDisabled bool + + // Whether the cgroup has the mountpoint of "devices" or not + CgroupDevicesEnabled bool +} + +type cgroupMemInfo struct { + // Whether memory limit is supported or not + MemoryLimit bool + + // Whether swap limit is supported or not + SwapLimit bool + + // Whether soft limit is supported or not + MemoryReservation bool + + // Whether OOM killer disable is supported or not + OomKillDisable bool + + // Whether memory swappiness is supported or not + MemorySwappiness bool + + // Whether kernel memory limit is supported or not + KernelMemory bool + + // Whether kernel memory TCP limit is supported or not + KernelMemoryTCP bool +} + +type cgroupCPUInfo struct { + // Whether CPU shares is supported or not + CPUShares bool + + // Whether CPU CFS(Completely Fair Scheduler) period is supported or not + CPUCfsPeriod bool + + // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not + CPUCfsQuota bool + + // Whether CPU real-time period is supported or not + CPURealtimePeriod bool + + // Whether CPU real-time runtime is supported or not + CPURealtimeRuntime bool +} + +type cgroupBlkioInfo struct { + // Whether Block IO weight is supported or not + BlkioWeight bool + + // Whether Block IO weight_device is supported or not + BlkioWeightDevice bool + + // Whether Block IO read limit in bytes per second is supported or not + BlkioReadBpsDevice bool + + // Whether Block IO write limit in bytes per second is supported or not + BlkioWriteBpsDevice bool + + // Whether Block IO read limit in IO per second is supported or not + BlkioReadIOpsDevice bool + + // Whether Block IO write limit in IO per second is supported or not + BlkioWriteIOpsDevice bool +} + +type cgroupCpusetInfo struct { + // Whether Cpuset is supported or not + Cpuset bool + + // Available Cpuset's cpus + Cpus string + + // Available Cpuset's memory nodes + Mems string +} + +type cgroupPids struct { + // Whether Pids Limit is supported or not + PidsLimit bool +} + +// IsCpusetCpusAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.cpus set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Cpus) +} + +// IsCpusetMemsAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.mems set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Mems) +} + +func isCpusetListAvailable(provided, available string) (bool, error) { + parsedAvailable, err := parsers.ParseUintList(available) + if err != nil { + return false, err + } + // 8192 is the normal maximum number of CPUs in Linux, so accept numbers up to this + // or more if we actually have more CPUs. + max := 8192 + for m := range parsedAvailable { + if m > max { + max = m + } + } + parsedProvided, err := parsers.ParseUintListMaximum(provided, max) + if err != nil { + return false, err + } + for k := range parsedProvided { + if !parsedAvailable[k] { + return false, nil + } + } + return true, nil +} + +// Returns bit count of 1, used by NumCPU +func popcnt(x uint64) (n byte) { + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return byte(x >> 56) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go new file mode 100644 index 000000000..7b36d6d13 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go @@ -0,0 +1,277 @@ +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func findCgroupMountpoints() (map[string]string, error) { + cgMounts, err := cgroups.GetCgroupMounts(false) + if err != nil { + return nil, fmt.Errorf("Failed to parse cgroup information: %v", err) + } + mps := make(map[string]string) + for _, m := range cgMounts { + for _, ss := range m.Subsystems { + mps[ss] = m.Mountpoint + } + } + return mps, nil +} + +type infoCollector func(info *SysInfo, cgMounts map[string]string) (warnings []string) + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. If `quiet` is `false` warnings are printed in logs +// whenever an error occurs or misconfigurations are present. +func New(quiet bool) *SysInfo { + var ops []infoCollector + var warnings []string + sysInfo := &SysInfo{} + cgMounts, err := findCgroupMountpoints() + if err != nil { + logrus.Warn(err) + } else { + ops = append(ops, []infoCollector{ + applyMemoryCgroupInfo, + applyCPUCgroupInfo, + applyBlkioCgroupInfo, + applyCPUSetCgroupInfo, + applyPIDSCgroupInfo, + applyDevicesCgroupInfo, + }...) + } + + ops = append(ops, []infoCollector{ + applyNetworkingInfo, + applyAppArmorInfo, + applySeccompInfo, + }...) + + for _, o := range ops { + w := o(sysInfo, cgMounts) + warnings = append(warnings, w...) + } + if !quiet { + for _, w := range warnings { + logrus.Warn(w) + } + } + return sysInfo +} + +// applyMemoryCgroupInfo reads the memory information from the memory cgroup mount point. +func applyMemoryCgroupInfo(info *SysInfo, cgMounts map[string]string) []string { + var warnings []string + mountPoint, ok := cgMounts["memory"] + if !ok { + warnings = append(warnings, "Your kernel does not support cgroup memory limit") + return warnings + } + info.MemoryLimit = ok + + info.SwapLimit = cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") + if !info.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap memory limit") + } + info.MemoryReservation = cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") + if !info.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory reservation") + } + info.OomKillDisable = cgroupEnabled(mountPoint, "memory.oom_control") + if !info.OomKillDisable { + warnings = append(warnings, "Your kernel does not support oom control") + } + info.MemorySwappiness = cgroupEnabled(mountPoint, "memory.swappiness") + if !info.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness") + } + info.KernelMemory = cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") + if !info.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit") + } + info.KernelMemoryTCP = cgroupEnabled(mountPoint, "memory.kmem.tcp.limit_in_bytes") + if !info.KernelMemoryTCP { + warnings = append(warnings, "Your kernel does not support kernel memory TCP limit") + } + + return warnings +} + +// applyCPUCgroupInfo reads the cpu information from the cpu cgroup mount point. +func applyCPUCgroupInfo(info *SysInfo, cgMounts map[string]string) []string { + var warnings []string + mountPoint, ok := cgMounts["cpu"] + if !ok { + warnings = append(warnings, "Unable to find cpu cgroup in mounts") + return warnings + } + + info.CPUShares = cgroupEnabled(mountPoint, "cpu.shares") + if !info.CPUShares { + warnings = append(warnings, "Your kernel does not support cgroup cpu shares") + } + + info.CPUCfsPeriod = cgroupEnabled(mountPoint, "cpu.cfs_period_us") + if !info.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support cgroup cfs period") + } + + info.CPUCfsQuota = cgroupEnabled(mountPoint, "cpu.cfs_quota_us") + if !info.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support cgroup cfs quotas") + } + + info.CPURealtimePeriod = cgroupEnabled(mountPoint, "cpu.rt_period_us") + if !info.CPURealtimePeriod { + warnings = append(warnings, "Your kernel does not support cgroup rt period") + } + + info.CPURealtimeRuntime = cgroupEnabled(mountPoint, "cpu.rt_runtime_us") + if !info.CPURealtimeRuntime { + warnings = append(warnings, "Your kernel does not support cgroup rt runtime") + } + + return warnings +} + +// applyBlkioCgroupInfo reads the blkio information from the blkio cgroup mount point. +func applyBlkioCgroupInfo(info *SysInfo, cgMounts map[string]string) []string { + var warnings []string + mountPoint, ok := cgMounts["blkio"] + if !ok { + warnings = append(warnings, "Unable to find blkio cgroup in mounts") + return warnings + } + + info.BlkioWeight = cgroupEnabled(mountPoint, "blkio.weight") + if !info.BlkioWeight { + warnings = append(warnings, "Your kernel does not support cgroup blkio weight") + } + + info.BlkioWeightDevice = cgroupEnabled(mountPoint, "blkio.weight_device") + if !info.BlkioWeightDevice { + warnings = append(warnings, "Your kernel does not support cgroup blkio weight_device") + } + + info.BlkioReadBpsDevice = cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") + if !info.BlkioReadBpsDevice { + warnings = append(warnings, "Your kernel does not support cgroup blkio throttle.read_bps_device") + } + + info.BlkioWriteBpsDevice = cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") + if !info.BlkioWriteBpsDevice { + warnings = append(warnings, "Your kernel does not support cgroup blkio throttle.write_bps_device") + } + info.BlkioReadIOpsDevice = cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") + if !info.BlkioReadIOpsDevice { + warnings = append(warnings, "Your kernel does not support cgroup blkio throttle.read_iops_device") + } + + info.BlkioWriteIOpsDevice = cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") + if !info.BlkioWriteIOpsDevice { + warnings = append(warnings, "Your kernel does not support cgroup blkio throttle.write_iops_device") + } + + return warnings +} + +// applyCPUSetCgroupInfo reads the cpuset information from the cpuset cgroup mount point. +func applyCPUSetCgroupInfo(info *SysInfo, cgMounts map[string]string) []string { + var warnings []string + mountPoint, ok := cgMounts["cpuset"] + if !ok { + warnings = append(warnings, "Unable to find cpuset cgroup in mounts") + return warnings + } + info.Cpuset = ok + + var err error + + cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) + if err != nil { + return warnings + } + info.Cpus = strings.TrimSpace(string(cpus)) + + mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) + if err != nil { + return warnings + } + info.Mems = strings.TrimSpace(string(mems)) + + return warnings +} + +// applyPIDSCgroupInfo reads the pids information from the pids cgroup mount point. +func applyPIDSCgroupInfo(info *SysInfo, _ map[string]string) []string { + var warnings []string + _, err := cgroups.FindCgroupMountpoint("", "pids") + if err != nil { + warnings = append(warnings, err.Error()) + return warnings + } + info.PidsLimit = true + return warnings +} + +// applyDevicesCgroupInfo reads the pids information from the devices cgroup mount point. +func applyDevicesCgroupInfo(info *SysInfo, cgMounts map[string]string) []string { + var warnings []string + _, ok := cgMounts["devices"] + info.CgroupDevicesEnabled = ok + return warnings +} + +// applyNetworkingInfo adds networking information to the info. +func applyNetworkingInfo(info *SysInfo, _ map[string]string) []string { + var warnings []string + info.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") + info.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") + info.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") + return warnings +} + +// applyAppArmorInfo adds AppArmor information to the info. +func applyAppArmorInfo(info *SysInfo, _ map[string]string) []string { + var warnings []string + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + if _, err := ioutil.ReadFile("/sys/kernel/security/apparmor/profiles"); err == nil { + info.AppArmor = true + } + } + return warnings +} + +// applySeccompInfo checks if Seccomp is supported, via CONFIG_SECCOMP. +func applySeccompInfo(info *SysInfo, _ map[string]string) []string { + var warnings []string + // Check if Seccomp is supported, via CONFIG_SECCOMP. + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { + info.Seccomp = true + } + } + return warnings +} + +func cgroupEnabled(mountPoint, name string) bool { + _, err := os.Stat(path.Join(mountPoint, name)) + return err == nil +} + +func readProcBool(path string) bool { + val, err := ioutil.ReadFile(path) + if err != nil { + return false + } + return strings.TrimSpace(string(val)) == "1" +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go new file mode 100644 index 000000000..23cc695fb --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go @@ -0,0 +1,9 @@ +// +build !linux,!windows + +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +// New returns an empty SysInfo for non linux for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go new file mode 100644 index 000000000..5f68524e7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go @@ -0,0 +1,7 @@ +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +// New returns an empty SysInfo for windows for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/chtimes.go b/extender/code/vendor/github.com/docker/docker/pkg/system/chtimes.go new file mode 100644 index 000000000..c26a4e24b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -0,0 +1,31 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + return setCTime(name, mtime) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go new file mode 100644 index 000000000..259138a45 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go new file mode 100644 index 000000000..d3a115ff4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go @@ -0,0 +1,26 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "time" + + "golang.org/x/sys/windows" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/errors.go b/extender/code/vendor/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 000000000..2573d7162 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/errors.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") + + // ErrNotSupportedOperatingSystem means the operating system is not supported. + ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") +) diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/exitcode.go b/extender/code/vendor/github.com/docker/docker/pkg/system/exitcode.go new file mode 100644 index 000000000..4ba8fe35b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/exitcode.go @@ -0,0 +1,19 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/filesys.go b/extender/code/vendor/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 000000000..adeb16305 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,67 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return MkdirAll(path, perm, sddl) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode, sddl string) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os and ioutils packages. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// TempFileSequential creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 000000000..a1f6013f1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -0,0 +1,296 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) +} + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, _ os.FileMode, sddl string) error { + return mkdirall(path, false, sddl) +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, applyACL bool, sddl string) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false, sddl) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if applyACL { + err = mkdirWithACL(path, sddl) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and windows.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string, sddl string) error { + sa := windows.SecurityAttributes{Length: 0} + sd, err := winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + + namep, err := windows.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := windows.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} + +// The origin of the functions below here are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := windowsOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for TempFileSequential +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/init.go b/extender/code/vendor/github.com/docker/docker/pkg/system/init.go new file mode 100644 index 000000000..a17597aab --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/init.go @@ -0,0 +1,22 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/init_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/system/init_unix.go new file mode 100644 index 000000000..4996a67c1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/init_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// InitLCOW does nothing since LCOW is a windows only feature +func InitLCOW(experimental bool) { +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/init_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/system/init_windows.go new file mode 100644 index 000000000..4910ff69d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -0,0 +1,12 @@ +package system // import "github.com/docker/docker/pkg/system" + +// lcowSupported determines if Linux Containers on Windows are supported. +var lcowSupported = false + +// InitLCOW sets whether LCOW is supported or not +func InitLCOW(experimental bool) { + v := GetOSVersion() + if experimental && v.Build >= 16299 { + lcowSupported = true + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/lcow.go b/extender/code/vendor/github.com/docker/docker/pkg/system/lcow.go new file mode 100644 index 000000000..5be3e2182 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/lcow.go @@ -0,0 +1,32 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "runtime" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// IsOSSupported determines if an operating system is supported by the host +func IsOSSupported(os string) bool { + if strings.EqualFold(runtime.GOOS, os) { + return true + } + if LCOWSupported() && strings.EqualFold(os, "linux") { + return true + } + return false +} + +// ValidatePlatform determines if a platform structure is valid. +// TODO This is a temporary windows-only function, should be replaced by +// comparison of worker capabilities +func ValidatePlatform(platform specs.Platform) error { + if runtime.GOOS == "windows" { + if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) { + return errors.Errorf("unsupported os %s", platform.OS) + } + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/system/lcow_unix.go new file mode 100644 index 000000000..26397fb8a --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/system/lcow_windows.go new file mode 100644 index 000000000..f0139df8f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system // import "github.com/docker/docker/pkg/system" + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/system/lstat_unix.go new file mode 100644 index 000000000..de5a1c0fb --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -0,0 +1,20 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} + } + return fromStatT(s) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 000000000..359c791d9 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/meminfo.go b/extender/code/vendor/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 000000000..6667eb84d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system // import "github.com/docker/docker/pkg/system" + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 000000000..d79e8b076 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/extender/code/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 000000000..56f449426 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows + +package system // import "github.com/docker/docker/pkg/system" + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 000000000..6ed93f2fe --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/mknod.go b/extender/code/vendor/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 000000000..b132482e0 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(unix.Mkdev(uint32(major), uint32(minor))) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 000000000..ec89d7a15 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/mknod_windows.go @@ -0,0 +1,11 @@ +package system // import "github.com/docker/docker/pkg/system" + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/path.go b/extender/code/vendor/github.com/docker/docker/pkg/system/path.go new file mode 100644 index 000000000..a3d957afa --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/path.go @@ -0,0 +1,60 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/containerd/continuity/pathdriver" +) + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(os string) string { + if runtime.GOOS == "windows" { + if os != runtime.GOOS { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. +// On Linux: this is a no-op. +// On Windows: this does the following> +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) { + if runtime.GOOS != "windows" || LCOWSupported() { + return path, nil + } + + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !driver.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/path_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 000000000..b0b93196a --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + return path, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/path_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 000000000..188f2c295 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -0,0 +1,24 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + // See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg + p := syscall.StringToUTF16(path) + b := p // GetLongPathName says we can reuse buffer + n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + _, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + return syscall.UTF16ToString(b), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/process_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/system/process_unix.go new file mode 100644 index 000000000..0195a891b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/process_unix.go @@ -0,0 +1,24 @@ +// +build linux freebsd darwin + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + unix.Kill(pid, unix.SIGKILL) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/process_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/system/process_windows.go new file mode 100644 index 000000000..4e70c97b1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/process_windows.go @@ -0,0 +1,18 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + _, err := os.FindProcess(pid) + + return err == nil +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + p, err := os.FindProcess(pid) + if err == nil { + p.Kill() + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/rm.go b/extender/code/vendor/github.com/docker/docker/pkg/system/rm.go new file mode 100644 index 000000000..b31099180 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/rm.go @@ -0,0 +1,80 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" + "time" + + "github.com/docker/docker/pkg/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 50 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return nil + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if mounted, _ := mount.Mounted(pe.Path); mounted { + if e := mount.Unmount(pe.Path); e != nil { + if mounted, _ := mount.Mounted(pe.Path); mounted { + return errors.Wrapf(e, "error while removing %s", dir) + } + } + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_darwin.go new file mode 100644 index 000000000..c1c0ee9f3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go new file mode 100644 index 000000000..c1c0ee9f3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 000000000..98c9eb18d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -0,0 +1,19 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go new file mode 100644 index 000000000..756b92d1e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 000000000..756b92d1e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_unix.go new file mode 100644 index 000000000..86bb6dd55 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -0,0 +1,66 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// IsDir reports whether s describes a directory. +func (s StatT) IsDir() bool { + return s.mode&syscall.S_IFDIR != 0 +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, &os.PathError{Op: "Stat", Path: path, Err: err} + } + return fromStatT(s) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 000000000..b2456cb88 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -0,0 +1,49 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/extender/code/vendor/github.com/docker/docker/pkg/system/syscall_unix.go new file mode 100644 index 000000000..919a412a7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/unix" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return unix.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/system/syscall_windows.go new file mode 100644 index 000000000..4ae92fa6c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -0,0 +1,193 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "syscall" + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +const ( + OWNER_SECURITY_INFORMATION = 0x00000001 + GROUP_SECURITY_INFORMATION = 0x00000002 + DACL_SECURITY_INFORMATION = 0x00000004 + SACL_SECURITY_INFORMATION = 0x00000008 + LABEL_SECURITY_INFORMATION = 0x00000010 + ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 + SCOPE_SECURITY_INFORMATION = 0x00000040 + PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080 + ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100 + BACKUP_SECURITY_INFORMATION = 0x00010000 + PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 + PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 + UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 + UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 +) + +const ( + SE_UNKNOWN_OBJECT_TYPE = iota + SE_FILE_OBJECT + SE_SERVICE + SE_PRINTER + SE_REGISTRY_KEY + SE_LMSHARE + SE_KERNEL_OBJECT + SE_WINDOW_OBJECT + SE_DS_OBJECT + SE_DS_OBJECT_ALL + SE_PROVIDER_DEFINED_OBJECT + SE_WMIGUID_OBJECT + SE_REGISTRY_WOW64_32KEY +) + +const ( + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +const ( + ContainerAdministratorSidString = "S-1-5-93-2-1" + ContainerUserSidString = "S-1-5-93-2-2" +) + +var ( + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") + procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := windows.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := windows.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(windows.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} + +func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + result = syscall.Errno(r0) + } + return +} + +func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) { + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0) + if r1 == 0 { + if e1 != 0 { + result = syscall.Errno(e1) + } else { + result = syscall.EINVAL + } + } + return +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/umask.go b/extender/code/vendor/github.com/docker/docker/pkg/system/umask.go new file mode 100644 index 000000000..9912a2bab --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return unix.Umask(newmask), nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/system/umask_windows.go new file mode 100644 index 000000000..fc62388c3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/umask_windows.go @@ -0,0 +1,7 @@ +package system // import "github.com/docker/docker/pkg/system" + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/extender/code/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go new file mode 100644 index 000000000..ed1b9fad5 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -0,0 +1,24 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/system/utimes_linux.go new file mode 100644 index 000000000..0afe85458 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/utimes_linux.go @@ -0,0 +1,25 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + atFdCwd := unix.AT_FDCWD + + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/extender/code/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 000000000..095e072e1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 000000000..66d4895b2 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -0,0 +1,29 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/unix" + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + if errno == unix.ENODATA { + return nil, nil + } + if errno == unix.ERANGE { + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + if errno != nil { + return nil, errno + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return unix.Lsetxattr(path, attr, data, flags) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/extender/code/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 000000000..d780a90cd --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system // import "github.com/docker/docker/pkg/system" + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go b/extender/code/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go new file mode 100644 index 000000000..c82fe603f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go @@ -0,0 +1,222 @@ +// Package tailfile provides helper functions to read the nth lines of any +// ReadSeeker. +package tailfile // import "github.com/docker/docker/pkg/tailfile" + +import ( + "bufio" + "bytes" + "context" + "errors" + "io" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") + +// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. +var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") + +//TailFile returns last n lines of the passed in file. +func TailFile(f *os.File, n int) ([][]byte, error) { + size, err := f.Seek(0, io.SeekEnd) + if err != nil { + return nil, err + } + + rAt := io.NewSectionReader(f, 0, size) + r, nLines, err := NewTailReader(context.Background(), rAt, n) + if err != nil { + return nil, err + } + + buf := make([][]byte, 0, nLines) + scanner := bufio.NewScanner(r) + + for scanner.Scan() { + buf = append(buf, scanner.Bytes()) + } + return buf, nil +} + +// SizeReaderAt is an interface used to get a ReaderAt as well as the size of the underlying reader. +// Note that the size of the underlying reader should not change when using this interface. +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +// NewTailReader scopes the passed in reader to just the last N lines passed in +func NewTailReader(ctx context.Context, r SizeReaderAt, reqLines int) (io.Reader, int, error) { + return NewTailReaderWithDelimiter(ctx, r, reqLines, eol) +} + +// NewTailReaderWithDelimiter scopes the passed in reader to just the last N lines passed in +// In this case a "line" is defined by the passed in delimiter. +// +// Delimiter lengths should be generally small, no more than 12 bytes +func NewTailReaderWithDelimiter(ctx context.Context, r SizeReaderAt, reqLines int, delimiter []byte) (io.Reader, int, error) { + if reqLines < 1 { + return nil, 0, ErrNonPositiveLinesNumber + } + if len(delimiter) == 0 { + return nil, 0, errors.New("must provide a delimiter") + } + var ( + size = r.Size() + tailStart int64 + tailEnd = size + found int + ) + + if int64(len(delimiter)) >= size { + return bytes.NewReader(nil), 0, nil + } + + scanner := newScanner(r, delimiter) + for scanner.Scan(ctx) { + if err := scanner.Err(); err != nil { + return nil, 0, scanner.Err() + } + + found++ + if found == 1 { + tailEnd = scanner.End() + } + if found == reqLines { + break + } + } + + tailStart = scanner.Start(ctx) + + if found == 0 { + return bytes.NewReader(nil), 0, nil + } + + if found < reqLines && tailStart != 0 { + tailStart = 0 + } + return io.NewSectionReader(r, tailStart, tailEnd-tailStart), found, nil +} + +func newScanner(r SizeReaderAt, delim []byte) *scanner { + size := r.Size() + readSize := blockSize + if readSize > int(size) { + readSize = int(size) + } + // silly case... + if len(delim) >= readSize/2 { + readSize = len(delim)*2 + 2 + } + + return &scanner{ + r: r, + pos: size, + buf: make([]byte, readSize), + delim: delim, + } +} + +type scanner struct { + r SizeReaderAt + pos int64 + buf []byte + delim []byte + err error + idx int + done bool +} + +func (s *scanner) Start(ctx context.Context) int64 { + if s.idx > 0 { + idx := bytes.LastIndex(s.buf[:s.idx], s.delim) + if idx >= 0 { + return s.pos + int64(idx) + int64(len(s.delim)) + } + } + + // slow path + buf := make([]byte, len(s.buf)) + copy(buf, s.buf) + + readAhead := &scanner{ + r: s.r, + pos: s.pos, + delim: s.delim, + idx: s.idx, + buf: buf, + } + + if !readAhead.Scan(ctx) { + return 0 + } + return readAhead.End() +} + +func (s *scanner) End() int64 { + return s.pos + int64(s.idx) + int64(len(s.delim)) +} + +func (s *scanner) Err() error { + return s.err +} + +func (s *scanner) Scan(ctx context.Context) bool { + if s.err != nil { + return false + } + + for { + select { + case <-ctx.Done(): + s.err = ctx.Err() + return false + default: + } + + idx := s.idx - len(s.delim) + if idx < 0 { + readSize := int(s.pos) + if readSize > len(s.buf) { + readSize = len(s.buf) + } + + if readSize < len(s.delim) { + return false + } + + offset := s.pos - int64(readSize) + n, err := s.r.ReadAt(s.buf[:readSize], offset) + if err != nil && err != io.EOF { + s.err = err + return false + } + + s.pos -= int64(n) + idx = n + } + + s.idx = bytes.LastIndex(s.buf[:idx], s.delim) + if s.idx >= 0 { + return true + } + + if len(s.delim) > 1 && s.pos > 0 { + // in this case, there may be a partial delimiter at the front of the buffer, so set the position forward + // up to the maximum size partial that could be there so it can be read again in the next iteration with any + // potential remainder. + // An example where delimiter is `####`: + // [##asdfqwerty] + // ^ + // This resets the position to where the arrow is pointing. + // It could actually check if a partial exists and at the front, but that is pretty similar to the indexing + // code above though a bit more complex since each byte has to be checked (`len(delimiter)-1`) factorial). + // It's much simpler and cleaner to just re-read `len(delimiter)-1` bytes again. + s.pos += int64(len(s.delim)) - 1 + } + + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go b/extender/code/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go new file mode 100644 index 000000000..bc7d84df4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go @@ -0,0 +1,21 @@ +package tarsum // import "github.com/docker/docker/pkg/tarsum" + +// BuilderContext is an interface extending TarSum by adding the Remove method. +// In general there was concern about adding this method to TarSum itself +// so instead it is being added just to "BuilderContext" which will then +// only be used during the .dockerignore file processing +// - see builder/evaluator.go +type BuilderContext interface { + TarSum + Remove(string) +} + +func (bc *tarSum) Remove(filename string) { + for i, fis := range bc.sums { + if fis.Name() == filename { + bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) + // Note, we don't just return because there could be + // more than one with this name + } + } +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/extender/code/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go new file mode 100644 index 000000000..01d4ed59b --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go @@ -0,0 +1,133 @@ +package tarsum // import "github.com/docker/docker/pkg/tarsum" + +import ( + "runtime" + "sort" + "strings" +) + +// FileInfoSumInterface provides an interface for accessing file checksum +// information within a tar file. This info is accessed through interface +// so the actual name and sum cannot be melded with. +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +// FileInfoSums provides a list of FileInfoSumInterfaces. +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name. +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + // We do case insensitive matching on Windows as c:\APP and c:\app are + // the same. See issue #33107. + for i := range fis { + if (runtime.GOOS == "windows" && strings.EqualFold(fis[i].Name(), name)) || + (runtime.GOOS != "windows" && fis[i].Name() == name) { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names. +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +// Len returns the size of the FileInfoSums. +func (fis FileInfoSums) Len() int { return len(fis) } + +// Swap swaps two FileInfoSum values if a FileInfoSums list. +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +// SortByPos sorts FileInfoSums content by position. +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +// SortByNames sorts FileInfoSums content by name. +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +// SortBySums sorts FileInfoSums content by sums. +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/extender/code/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go new file mode 100644 index 000000000..5542e1b2c --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -0,0 +1,301 @@ +// Package tarsum provides algorithms to perform checksum calculation on +// filesystem layers. +// +// The transportation of filesystems, regarding Docker, is done with tar(1) +// archives. There are a variety of tar serialization formats [2], and a key +// concern here is ensuring a repeatable checksum given a set of inputs from a +// generic tar archive. Types of transportation include distribution to and from a +// registry endpoint, saving and loading through commands or Docker daemon APIs, +// transferring the build context from client to Docker daemon, and committing the +// filesystem of a container to become an image. +// +// As tar archives are used for transit, but not preserved in many situations, the +// focus of the algorithm is to ensure the integrity of the preserved filesystem, +// while maintaining a deterministic accountability. This includes neither +// constraining the ordering or manipulation of the files during the creation or +// unpacking of the archive, nor include additional metadata state about the file +// system attributes. +package tarsum // import "github.com/docker/docker/pkg/tarsum" + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "path" + "strings" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + return NewTarSumHash(r, dc, v, DefaultTHash) +} + +// NewTarSumHash creates a new TarSum, providing a THash to use rather than +// the DefaultTHash. +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err + } + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err +} + +// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. +func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { + parts := strings.SplitN(label, "+", 2) + if len(parts) != 2 { + return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") + } + + versionName, hashName := parts[0], parts[1] + + version, ok := tarSumVersionsByName[versionName] + if !ok { + return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) + } + + hashConfig, ok := standardHashConfigs[hashName] + if !ok { + return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) + } + + tHash := NewTHash(hashConfig.name, hashConfig.hash.New) + + return NewTarSumHash(r, disableCompression, version, tHash) +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive. +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation. +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// THash provides a hash.Hash type generator and its name. +type THash interface { + Hash() hash.Hash + Name() string +} + +// NewTHash is a convenience method for creating a THash. +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +type tHashConfig struct { + name string + hash crypto.Hash +} + +var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. + standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, + } +) + +// DefaultTHash is default TarSum hashing algorithm - "sha256". +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.headerSelector.selectHeaders(h) { + // Ignore these headers to be compatible with versions + // before go 1.10 + if elem[0] == "gname" || elem[0] == "uname" { + elem[1] = "" + } + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return ts.bufWriter.Read(buf) + } + return 0, err + } + + ts.currentFile = path.Join(".", path.Join("/", currentHeader.Name)) + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return 0, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writer + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/extender/code/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md new file mode 100644 index 000000000..89b2e49f9 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,230 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithms used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on filesystem layers, the need for this method over existing +methods, and the versioning of this calculation. + +## Warning + +This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. + +This is _not_ a cryptographic attestation, and should not be considered secure. + +## Introduction + +The transportation of filesystems, regarding Docker, is done with tar(1) +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved filesystem, +while maintaining a deterministic accountability. This includes neither +constraining the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for filesystems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + +## Concept + +The checksum mechanism must ensure the integrity and assurance of the +filesystem payload. + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* Associated hashing cipher - used to checksum each file payload and attribute + information. +* Checksum list - each file of the filesystem archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* Version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* Archive being calculated - the tar archive having its checksum calculated + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from hashing cipher +* ':' separates calculation mechanics from expected hash + +Example: + +``` + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| +``` + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the TarSum checksum string is `tarsum`. + +### Version1 + +Its element in the TarSum checksum is `tarsum.v1`. + +The notable changes in this version: +* Exclusion of file `mtime` from the file information headers, in each file + checksum calculation +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the TarSum checksum is `tarsum.dev`. + +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. + +## Ciphers + +The official default and standard hashing cipher used in the calculation mechanic +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + +#### Body + +After the order headers of the file have been added to the checksum for the +file, the body of the file is written to the hash. + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + +#### Final Checksum + +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 + +## Acknowledgments + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + diff --git a/extender/code/vendor/github.com/docker/docker/pkg/tarsum/versioning.go b/extender/code/vendor/github.com/docker/docker/pkg/tarsum/versioning.go new file mode 100644 index 000000000..aa1f17186 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/tarsum/versioning.go @@ -0,0 +1,158 @@ +package tarsum // import "github.com/docker/docker/pkg/tarsum" + +import ( + "archive/tar" + "errors" + "io" + "sort" + "strconv" + "strings" +) + +// Version is used for versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +// Prefix of "tarsum" +const ( + Version0 Version = iota + Version1 + // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation + VersionDev +) + +// WriteV1Header writes a tar header to a writer in V1 tarsum format. +func WriteV1Header(h *tar.Header, w io.Writer) { + for _, elem := range v1TarHeaderSelect(h) { + w.Write([]byte(elem[0] + elem[1])) + } +} + +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + +// GetVersions gets a list of all known tarsum versions. +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var ( + tarSumVersions = map[Version]string{ + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", + } + tarSumVersionsByName = map[string]Version{ + "tarsum": Version0, + "tarsum.v1": Version1, + "tarsum.dev": VersionDev, + } +) + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string. +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +// Errors that may be returned by functions in this package +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.FormatInt(h.Mode, 10)}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go b/extender/code/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go new file mode 100644 index 000000000..c4c45a35e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum // import "github.com/docker/docker/pkg/tarsum" + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/term/ascii.go b/extender/code/vendor/github.com/docker/docker/pkg/term/ascii.go new file mode 100644 index 000000000..87bca8d4a --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/term/ascii.go @@ -0,0 +1,66 @@ +package term // import "github.com/docker/docker/pkg/term" + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, key[0]) + } + } + return codes, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/term/proxy.go b/extender/code/vendor/github.com/docker/docker/pkg/term/proxy.go new file mode 100644 index 000000000..da733e584 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/term/proxy.go @@ -0,0 +1,78 @@ +package term // import "github.com/docker/docker/pkg/term" + +import ( + "io" +) + +// EscapeError is special error which returned by a TTY proxy reader's Read() +// method in case its detach escape sequence is read. +type EscapeError struct{} + +func (EscapeError) Error() string { + return "read escape sequence" +} + +// escapeProxy is used only for attaches with a TTY. It is used to proxy +// stdin keypresses from the underlying reader and look for the passed in +// escape key sequence to signal a detach. +type escapeProxy struct { + escapeKeys []byte + escapeKeyPos int + r io.Reader +} + +// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader +// and detects when the specified escape keys are read, in which case the Read +// method will return an error of type EscapeError. +func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { + return &escapeProxy{ + escapeKeys: escapeKeys, + r: r, + } +} + +func (r *escapeProxy) Read(buf []byte) (int, error) { + nr, err := r.r.Read(buf) + + if len(r.escapeKeys) == 0 { + return nr, err + } + + preserve := func() { + // this preserves the original key presses in the passed in buffer + nr += r.escapeKeyPos + preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) + preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) + preserve = append(preserve, buf...) + r.escapeKeyPos = 0 + copy(buf[0:nr], preserve) + } + + if nr != 1 || err != nil { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, err + } + + if buf[0] != r.escapeKeys[r.escapeKeyPos] { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, nil + } + + if r.escapeKeyPos == len(r.escapeKeys)-1 { + return 0, EscapeError{} + } + + // Looks like we've got an escape key, but we need to match again on the next + // read. + // Store the current escape key we found so we can look for the next one on + // the next read. + // Since this is an escape key, make sure we don't let the caller read it + // If later on we find that this is not the escape sequence, we'll add the + // keys back + r.escapeKeyPos++ + return nr - r.escapeKeyPos, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/term/tc.go b/extender/code/vendor/github.com/docker/docker/pkg/term/tc.go new file mode 100644 index 000000000..01bcaa8ab --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/term/tc.go @@ -0,0 +1,20 @@ +// +build !windows + +package term // import "github.com/docker/docker/pkg/term" + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/term/term.go b/extender/code/vendor/github.com/docker/docker/pkg/term/term.go new file mode 100644 index 000000000..0589a9551 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/term/term.go @@ -0,0 +1,124 @@ +// +build !windows + +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term // import "github.com/docker/docker/pkg/term" + +import ( + "errors" + "fmt" + "io" + "os" + "os/signal" + + "golang.org/x/sys/unix" +) + +var ( + // ErrInvalidState is returned if the state of the terminal is invalid. + ErrInvalidState = errors.New("Invalid terminal state") +) + +// State represents the state of the terminal. +type State struct { + termios Termios +} + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stderr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + return tcget(fd, &termios) == 0 +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + if err := tcset(fd, &state.termios); err != 0 { + return err + } + return nil +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= unix.ECHO + + if err := tcset(fd, &newState); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + go func() { + for range sigchan { + // quit cleanly and the new terminal item is on a new line + fmt.Println() + signal.Stop(sigchan) + close(sigchan) + RestoreTerminal(fd, state) + os.Exit(1) + } + }() +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/term/term_windows.go b/extender/code/vendor/github.com/docker/docker/pkg/term/term_windows.go new file mode 100644 index 000000000..a3c3db131 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -0,0 +1,221 @@ +package term // import "github.com/docker/docker/pkg/term" + +import ( + "io" + "os" + "os/signal" + "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE + + "github.com/Azure/go-ansiterm/winterm" + "github.com/docker/docker/pkg/term/windows" +) + +// State holds the console mode for the terminal. +type State struct { + mode uint32 +} + +// Winsize is used for window size. +type Winsize struct { + Height uint16 + Width uint16 +} + +// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console +var vtInputSupported bool + +// StdStreams returns the standard streams (stdin, stdout, stderr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + // Turn on VT handling on all std handles, if possible. This might + // fail, in which case we will fall back to terminal emulation. + var emulateStdin, emulateStdout, emulateStderr bool + fd := os.Stdin.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { + emulateStdin = true + } else { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + winterm.SetConsoleMode(fd, mode) + } + + fd = os.Stdout.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + emulateStdout = true + } else { + winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + } + } + + fd = os.Stderr.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + emulateStderr = true + } else { + winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + } + } + + // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and + // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as + // go-ansiterm hasn't switch to x/sys/windows. + // TODO: switch back to x/sys/windows once go-ansiterm has switched + if emulateStdin { + stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE) + } else { + stdIn = os.Stdin + } + + if emulateStdout { + stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) + } else { + stdOut = os.Stdout + } + + if emulateStderr { + stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE) + } else { + stdErr = os.Stderr + } + + return +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + return windowsconsole.GetHandleInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return windowsconsole.IsConsole(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return winterm.SetConsoleMode(fd, state.mode) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + mode, e := winterm.GetConsoleMode(fd) + if e != nil { + return nil, e + } + + return &State{mode: mode}, nil +} + +// DisableEcho disables echo for the terminal connected to the given file descriptor. +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx +func DisableEcho(fd uintptr, state *State) error { + mode := state.mode + mode &^= winterm.ENABLE_ECHO_INPUT + mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT + err := winterm.SetConsoleMode(fd, mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + state, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return state, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this + // version of Windows. + winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN) + return state, err +} + +// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be restored. +func MakeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= winterm.ENABLE_ECHO_INPUT + mode &^= winterm.ENABLE_LINE_INPUT + mode &^= winterm.ENABLE_MOUSE_INPUT + mode &^= winterm.ENABLE_WINDOW_INPUT + mode &^= winterm.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= winterm.ENABLE_EXTENDED_FLAGS + mode |= winterm.ENABLE_INSERT_MODE + mode |= winterm.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + err = winterm.SetConsoleMode(fd, mode) + if err != nil { + return nil, err + } + return state, nil +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/term/termios_bsd.go b/extender/code/vendor/github.com/docker/docker/pkg/term/termios_bsd.go new file mode 100644 index 000000000..48b16f520 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/term/termios_bsd.go @@ -0,0 +1,42 @@ +// +build darwin freebsd openbsd netbsd + +package term // import "github.com/docker/docker/pkg/term" + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TIOCGETA + setTermios = unix.TIOCSETA +) + +// Termios is the Unix API for terminal I/O. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + newState.Oflag &^= unix.OPOST + newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + newState.Cflag &^= (unix.CSIZE | unix.PARENB) + newState.Cflag |= unix.CS8 + newState.Cc[unix.VMIN] = 1 + newState.Cc[unix.VTIME] = 0 + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/extender/code/vendor/github.com/docker/docker/pkg/term/termios_linux.go new file mode 100644 index 000000000..6d4c63fdb --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/term/termios_linux.go @@ -0,0 +1,39 @@ +package term // import "github.com/docker/docker/pkg/term" + +import ( + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TCGETS + setTermios = unix.TCSETS +) + +// Termios is the Unix API for terminal I/O. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + termios, err := unix.IoctlGetTermios(int(fd), getTermios) + if err != nil { + return nil, err + } + + var oldState State + oldState.termios = Termios(*termios) + + termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + termios.Oflag &^= unix.OPOST + termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + termios.Cflag &^= (unix.CSIZE | unix.PARENB) + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil { + return nil, err + } + return &oldState, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/extender/code/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go new file mode 100644 index 000000000..1d7c452cc --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go @@ -0,0 +1,263 @@ +// +build windows + +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "unsafe" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +const ( + escapeSequence = ansiterm.KEY_ESC_CSI +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte +} + +// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a +// Windows console input handle. +func NewAnsiReader(nFile int) io.ReadCloser { + initLogger() + file, fd := winterm.GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + logger.Debugf("Reading previously cached bytes") + + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar.fd, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + logger.Debug("No input events detected") + return 0, nil + } + + keyBytes := translateKeyEvents(events, []byte(escapeSequence)) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + logger.Debug("No key bytes returned from the translator") + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("unexpected copy length encountered") + } + + logger.Debugf("Read p[%d]: % x", copiedLength, p) + logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS + } else if countRecords == 0 { + countRecords = 1 + } + logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) + + // Wait for and read input events + events := make([]winterm.INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = winterm.ReadConsoleInput(fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[uint16]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[uint16]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + + // +Key generates ESC N Key + if !control && alt { + return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + + return string(keyEvent.UnicodeChar) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState uint32) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control bool) string { + if shift && alt && control { + return ansiterm.KEY_CONTROL_PARAM_8 + } + if alt && control { + return ansiterm.KEY_CONTROL_PARAM_7 + } + if shift && control { + return ansiterm.KEY_CONTROL_PARAM_6 + } + if control { + return ansiterm.KEY_CONTROL_PARAM_5 + } + if shift && alt { + return ansiterm.KEY_CONTROL_PARAM_4 + } + if alt { + return ansiterm.KEY_CONTROL_PARAM_3 + } + if shift { + return ansiterm.KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/extender/code/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go new file mode 100644 index 000000000..7799a03fc --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go @@ -0,0 +1,64 @@ +// +build windows + +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" + +import ( + "io" + "os" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *ansiterm.AnsiParser +} + +// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a +// Windows console output handle. +func NewAnsiWriter(nFile int) io.Writer { + initLogger() + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + logger.Infof("newAnsiWriter: parser %p", parser) + + aw := &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), + parser: parser, + } + + logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) + logger.Infof("newAnsiWriter: %v", aw) + return aw +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + logger.Infof("Write: % x", p) + logger.Infof("Write: %s", string(p)) + return aw.parser.Parse(p) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/term/windows/console.go b/extender/code/vendor/github.com/docker/docker/pkg/term/windows/console.go new file mode 100644 index 000000000..527401975 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/term/windows/console.go @@ -0,0 +1,35 @@ +// +build windows + +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" + +import ( + "os" + + "github.com/Azure/go-ansiterm/winterm" +) + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = IsConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +func IsConsole(fd uintptr) bool { + _, e := winterm.GetConsoleMode(fd) + return e == nil +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/extender/code/vendor/github.com/docker/docker/pkg/term/windows/windows.go new file mode 100644 index 000000000..3e5593ca6 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/term/windows/windows.go @@ -0,0 +1,33 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" + +import ( + "io/ioutil" + "os" + "sync" + + "github.com/Azure/go-ansiterm" + "github.com/sirupsen/logrus" +) + +var logger *logrus.Logger +var initOnce sync.Once + +func initLogger() { + initOnce.Do(func() { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiReaderWriter.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + }) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/term/winsize.go b/extender/code/vendor/github.com/docker/docker/pkg/term/winsize.go new file mode 100644 index 000000000..a19663ad8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/term/winsize.go @@ -0,0 +1,20 @@ +// +build !windows + +package term // import "github.com/docker/docker/pkg/term" + +import ( + "golang.org/x/sys/unix" +) + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go b/extender/code/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go new file mode 100644 index 000000000..9cf348c72 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go @@ -0,0 +1,52 @@ +// Package urlutil provides helper function to check urls kind. +// It supports http urls, git urls and transport url (tcp://, …) +package urlutil // import "github.com/docker/docker/pkg/urlutil" + +import ( + "regexp" + "strings" +) + +var ( + validPrefixes = map[string][]string{ + "url": {"http://", "https://"}, + + // The github.com/ prefix is a special case used to treat context-paths + // starting with `github.com` as a git URL if the given path does not + // exist locally. The "github.com/" prefix is kept for backward compatibility, + // and is a legacy feature. + // + // Going forward, no additional prefixes should be added, and users should + // be encouraged to use explicit URLs (https://github.com/user/repo.git) instead. + "git": {"git://", "github.com/", "git@"}, + "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, + } + urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") +) + +// IsURL returns true if the provided str is an HTTP(S) URL. +func IsURL(str string) bool { + return checkURL(str, "url") +} + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { + return true + } + return checkURL(str, "git") +} + +// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. +func IsTransportURL(str string) bool { + return checkURL(str, "transport") +} + +func checkURL(str, kind string) bool { + for _, prefix := range validPrefixes[kind] { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/extender/code/vendor/github.com/docker/docker/pkg/useragent/README.md b/extender/code/vendor/github.com/docker/docker/pkg/useragent/README.md new file mode 100644 index 000000000..d9cb367d1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/extender/code/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/extender/code/vendor/github.com/docker/docker/pkg/useragent/useragent.go new file mode 100644 index 000000000..22db82129 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent // import "github.com/docker/docker/pkg/useragent" + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/extender/code/vendor/github.com/docker/docker/plugin/v2/plugin.go b/extender/code/vendor/github.com/docker/docker/plugin/v2/plugin.go new file mode 100644 index 000000000..6852511c5 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/plugin/v2/plugin.go @@ -0,0 +1,311 @@ +package v2 // import "github.com/docker/docker/plugin/v2" + +import ( + "fmt" + "net" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Plugin represents an individual plugin. +type Plugin struct { + mu sync.RWMutex + PluginObj types.Plugin `json:"plugin"` // todo: embed struct + pClient *plugins.Client + refCount int + Rootfs string // TODO: make private + + Config digest.Digest + Blobsums []digest.Digest + + modifyRuntimeSpec func(*specs.Spec) + + SwarmServiceID string + timeout time.Duration + addr net.Addr +} + +const defaultPluginRuntimeDestination = "/run/docker/plugins" + +// ErrInadequateCapability indicates that the plugin did not have the requested capability. +type ErrInadequateCapability struct { + cap string +} + +func (e ErrInadequateCapability) Error() string { + return fmt.Sprintf("plugin does not provide %q capability", e.cap) +} + +// ScopedPath returns the path scoped to the plugin rootfs +func (p *Plugin) ScopedPath(s string) string { + if p.PluginObj.Config.PropagatedMount != "" && strings.HasPrefix(s, p.PluginObj.Config.PropagatedMount) { + // re-scope to the propagated mount path on the host + return filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount", strings.TrimPrefix(s, p.PluginObj.Config.PropagatedMount)) + } + return filepath.Join(p.Rootfs, s) +} + +// Client returns the plugin client. +// Deprecated: use p.Addr() and manually create the client +func (p *Plugin) Client() *plugins.Client { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.pClient +} + +// SetPClient set the plugin client. +// Deprecated: Hardcoded plugin client is deprecated +func (p *Plugin) SetPClient(client *plugins.Client) { + p.mu.Lock() + defer p.mu.Unlock() + + p.pClient = client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return false +} + +// Name returns the plugin name. +func (p *Plugin) Name() string { + return p.PluginObj.Name +} + +// FilterByCap query the plugin for a given capability. +func (p *Plugin) FilterByCap(capability string) (*Plugin, error) { + capability = strings.ToLower(capability) + for _, typ := range p.PluginObj.Config.Interface.Types { + if typ.Capability == capability && typ.Prefix == "docker" { + return p, nil + } + } + return nil, ErrInadequateCapability{capability} +} + +// InitEmptySettings initializes empty settings for a plugin. +func (p *Plugin) InitEmptySettings() { + p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts)) + copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts) + p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices)) + copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices) + p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env)) + for _, env := range p.PluginObj.Config.Env { + if env.Value != nil { + p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value)) + } + } + p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value)) + copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value) +} + +// Set is used to pass arguments to the plugin. +func (p *Plugin) Set(args []string) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.PluginObj.Enabled { + return fmt.Errorf("cannot set on an active plugin, disable plugin before setting") + } + + sets, err := newSettables(args) + if err != nil { + return err + } + + // TODO(vieux): lots of code duplication here, needs to be refactored. + +next: + for _, s := range sets { + // range over all the envs in the config + for _, env := range p.PluginObj.Config.Env { + // found the env in the config + if env.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + // is it, so lets update the settings in memory + updateSettingsEnv(&p.PluginObj.Settings.Env, &s) + continue next + } + } + + // range over all the mounts in the config + for _, mount := range p.PluginObj.Config.Mounts { + // found the mount in the config + if mount.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + if mount.Source == nil { + return fmt.Errorf("Plugin config has no mount source") + } + *mount.Source = s.value + continue next + } + } + + // range over all the devices in the config + for _, device := range p.PluginObj.Config.Linux.Devices { + // found the device in the config + if device.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + if device.Path == nil { + return fmt.Errorf("Plugin config has no device path") + } + *device.Path = s.value + continue next + } + } + + // found the name in the config + if p.PluginObj.Config.Args.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + p.PluginObj.Settings.Args = strings.Split(s.value, " ") + continue next + } + + return fmt.Errorf("setting %q not found in the plugin configuration", s.name) + } + + return nil +} + +// IsEnabled returns the active state of the plugin. +func (p *Plugin) IsEnabled() bool { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Enabled +} + +// GetID returns the plugin's ID. +func (p *Plugin) GetID() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.ID +} + +// GetSocket returns the plugin socket. +func (p *Plugin) GetSocket() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Socket +} + +// GetTypes returns the interface types of a plugin. +func (p *Plugin) GetTypes() []types.PluginInterfaceType { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Types +} + +// GetRefCount returns the reference count. +func (p *Plugin) GetRefCount() int { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.refCount +} + +// AddRefCount adds to reference count. +func (p *Plugin) AddRefCount(count int) { + p.mu.Lock() + defer p.mu.Unlock() + + p.refCount += count +} + +// Acquire increments the plugin's reference count +// This should be followed up by `Release()` when the plugin is no longer in use. +func (p *Plugin) Acquire() { + p.AddRefCount(plugingetter.Acquire) +} + +// Release decrements the plugin's reference count +// This should only be called when the plugin is no longer in use, e.g. with +// via `Acquire()` or getter.Get("name", "type", plugingetter.Acquire) +func (p *Plugin) Release() { + p.AddRefCount(plugingetter.Release) +} + +// SetSpecOptModifier sets the function to use to modify the generated +// runtime spec. +func (p *Plugin) SetSpecOptModifier(f func(*specs.Spec)) { + p.mu.Lock() + p.modifyRuntimeSpec = f + p.mu.Unlock() +} + +// Timeout gets the currently configured connection timeout. +// This should be used when dialing the plugin. +func (p *Plugin) Timeout() time.Duration { + p.mu.RLock() + t := p.timeout + p.mu.RUnlock() + return t +} + +// SetTimeout sets the timeout to use for dialing. +func (p *Plugin) SetTimeout(t time.Duration) { + p.mu.Lock() + p.timeout = t + p.mu.Unlock() +} + +// Addr returns the net.Addr to use to connect to the plugin socket +func (p *Plugin) Addr() net.Addr { + p.mu.RLock() + addr := p.addr + p.mu.RUnlock() + return addr +} + +// SetAddr sets the plugin address which can be used for dialing the plugin. +func (p *Plugin) SetAddr(addr net.Addr) { + p.mu.Lock() + p.addr = addr + p.mu.Unlock() +} + +// Protocol is the protocol that should be used for interacting with the plugin. +func (p *Plugin) Protocol() string { + if p.PluginObj.Config.Interface.ProtocolScheme != "" { + return p.PluginObj.Config.Interface.ProtocolScheme + } + return plugins.ProtocolSchemeHTTPV1 +} diff --git a/extender/code/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/extender/code/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go new file mode 100644 index 000000000..58c432fcd --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go @@ -0,0 +1,141 @@ +package v2 // import "github.com/docker/docker/plugin/v2" + +import ( + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + s := oci.DefaultSpec() + + s.Root = &specs.Root{ + Path: p.Rootfs, + Readonly: false, // TODO: all plugins should be readonly? settable in config? + } + + userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) + for _, m := range p.PluginObj.Settings.Mounts { + userMounts[m.Destination] = struct{}{} + } + + execRoot = filepath.Join(execRoot, p.PluginObj.ID) + if err := os.MkdirAll(execRoot, 0700); err != nil { + return nil, errors.WithStack(err) + } + + if p.PluginObj.Config.PropagatedMount != "" { + pRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + s.Mounts = append(s.Mounts, specs.Mount{ + Source: pRoot, + Destination: p.PluginObj.Config.PropagatedMount, + Type: "bind", + Options: []string{"rbind", "rw", "rshared"}, + }) + s.Linux.RootfsPropagation = "rshared" + } + + mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ + Source: &execRoot, + Destination: defaultPluginRuntimeDestination, + Type: "bind", + Options: []string{"rbind", "rshared"}, + }) + + if p.PluginObj.Config.Network.Type != "" { + // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) + if p.PluginObj.Config.Network.Type == "host" { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("network")) + } + etcHosts := "/etc/hosts" + resolvConf := "/etc/resolv.conf" + mounts = append(mounts, + types.PluginMount{ + Source: &etcHosts, + Destination: etcHosts, + Type: "bind", + Options: []string{"rbind", "ro"}, + }, + types.PluginMount{ + Source: &resolvConf, + Destination: resolvConf, + Type: "bind", + Options: []string{"rbind", "ro"}, + }) + } + if p.PluginObj.Config.PidHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("pid")) + } + + if p.PluginObj.Config.IpcHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("ipc")) + } + + for _, mnt := range mounts { + m := specs.Mount{ + Destination: mnt.Destination, + Type: mnt.Type, + Options: mnt.Options, + } + if mnt.Source == nil { + return nil, errors.New("mount source is not specified") + } + m.Source = *mnt.Source + s.Mounts = append(s.Mounts, m) + } + + for i, m := range s.Mounts { + if strings.HasPrefix(m.Destination, "/dev/") { + if _, ok := userMounts[m.Destination]; ok { + s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) + } + } + } + + if p.PluginObj.Config.Linux.AllowAllDevices { + s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{{Allow: true, Access: "rwm"}} + } + for _, dev := range p.PluginObj.Settings.Devices { + path := *dev.Path + d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") + if err != nil { + return nil, errors.WithStack(err) + } + s.Linux.Devices = append(s.Linux.Devices, d...) + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) + } + + envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) + envs[0] = "PATH=" + system.DefaultPathEnv(runtime.GOOS) + envs = append(envs, p.PluginObj.Settings.Env...) + + args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) + cwd := p.PluginObj.Config.WorkDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Terminal = false + s.Process.Args = args + s.Process.Cwd = cwd + s.Process.Env = envs + + caps := s.Process.Capabilities + caps.Bounding = append(caps.Bounding, p.PluginObj.Config.Linux.Capabilities...) + caps.Permitted = append(caps.Permitted, p.PluginObj.Config.Linux.Capabilities...) + caps.Inheritable = append(caps.Inheritable, p.PluginObj.Config.Linux.Capabilities...) + caps.Effective = append(caps.Effective, p.PluginObj.Config.Linux.Capabilities...) + + if p.modifyRuntimeSpec != nil { + p.modifyRuntimeSpec(&s) + } + + return &s, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go b/extender/code/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go new file mode 100644 index 000000000..5242fe124 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go @@ -0,0 +1,14 @@ +// +build !linux + +package v2 // import "github.com/docker/docker/plugin/v2" + +import ( + "errors" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + return nil, errors.New("not supported") +} diff --git a/extender/code/vendor/github.com/docker/docker/plugin/v2/settable.go b/extender/code/vendor/github.com/docker/docker/plugin/v2/settable.go new file mode 100644 index 000000000..efda56470 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/plugin/v2/settable.go @@ -0,0 +1,102 @@ +package v2 // import "github.com/docker/docker/plugin/v2" + +import ( + "errors" + "fmt" + "strings" +) + +type settable struct { + name string + field string + value string +} + +var ( + allowedSettableFieldsEnv = []string{"value"} + allowedSettableFieldsArgs = []string{"value"} + allowedSettableFieldsDevices = []string{"path"} + allowedSettableFieldsMounts = []string{"source"} + + errMultipleFields = errors.New("multiple fields are settable, one must be specified") + errInvalidFormat = errors.New("invalid format, must be [.][=]") +) + +func newSettables(args []string) ([]settable, error) { + sets := make([]settable, 0, len(args)) + for _, arg := range args { + set, err := newSettable(arg) + if err != nil { + return nil, err + } + sets = append(sets, set) + } + return sets, nil +} + +func newSettable(arg string) (settable, error) { + var set settable + if i := strings.Index(arg, "="); i == 0 { + return set, errInvalidFormat + } else if i < 0 { + set.name = arg + } else { + set.name = arg[:i] + set.value = arg[i+1:] + } + + if i := strings.LastIndex(set.name, "."); i > 0 { + set.field = set.name[i+1:] + set.name = arg[:i] + } + + return set, nil +} + +// prettyName return name.field if there is a field, otherwise name. +func (set *settable) prettyName() string { + if set.field != "" { + return fmt.Sprintf("%s.%s", set.name, set.field) + } + return set.name +} + +func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) { + if set.field == "" { + if len(settable) == 1 { + // if field is not specified and there only one settable, default to it. + set.field = settable[0] + } else if len(settable) > 1 { + return false, errMultipleFields + } + } + + isAllowed := false + for _, allowedSettableField := range allowedSettableFields { + if set.field == allowedSettableField { + isAllowed = true + break + } + } + + if isAllowed { + for _, settableField := range settable { + if set.field == settableField { + return true, nil + } + } + } + + return false, nil +} + +func updateSettingsEnv(env *[]string, set *settable) { + for i, e := range *env { + if parts := strings.SplitN(e, "=", 2); parts[0] == set.name { + (*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value) + return + } + } + + *env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value)) +} diff --git a/extender/code/vendor/github.com/docker/docker/restartmanager/restartmanager.go b/extender/code/vendor/github.com/docker/docker/restartmanager/restartmanager.go new file mode 100644 index 000000000..6468ccf7e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/restartmanager/restartmanager.go @@ -0,0 +1,133 @@ +package restartmanager // import "github.com/docker/docker/restartmanager" + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/docker/api/types/container" +) + +const ( + backoffMultiplier = 2 + defaultTimeout = 100 * time.Millisecond + maxRestartTimeout = 1 * time.Minute +) + +// ErrRestartCanceled is returned when the restart manager has been +// canceled and will no longer restart the container. +var ErrRestartCanceled = errors.New("restart canceled") + +// RestartManager defines object that controls container restarting rules. +type RestartManager interface { + Cancel() error + ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) +} + +type restartManager struct { + sync.Mutex + sync.Once + policy container.RestartPolicy + restartCount int + timeout time.Duration + active bool + cancel chan struct{} + canceled bool +} + +// New returns a new restartManager based on a policy. +func New(policy container.RestartPolicy, restartCount int) RestartManager { + return &restartManager{policy: policy, restartCount: restartCount, cancel: make(chan struct{})} +} + +func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { + rm.Lock() + rm.policy = policy + rm.Unlock() +} + +func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) { + if rm.policy.IsNone() { + return false, nil, nil + } + rm.Lock() + unlockOnExit := true + defer func() { + if unlockOnExit { + rm.Unlock() + } + }() + + if rm.canceled { + return false, nil, ErrRestartCanceled + } + + if rm.active { + return false, nil, fmt.Errorf("invalid call on an active restart manager") + } + // if the container ran for more than 10s, regardless of status and policy reset the + // the timeout back to the default. + if executionDuration.Seconds() >= 10 { + rm.timeout = 0 + } + switch { + case rm.timeout == 0: + rm.timeout = defaultTimeout + case rm.timeout < maxRestartTimeout: + rm.timeout *= backoffMultiplier + } + if rm.timeout > maxRestartTimeout { + rm.timeout = maxRestartTimeout + } + + var restart bool + switch { + case rm.policy.IsAlways(): + restart = true + case rm.policy.IsUnlessStopped() && !hasBeenManuallyStopped: + restart = true + case rm.policy.IsOnFailure(): + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := rm.policy.MaximumRetryCount; max == 0 || rm.restartCount < max { + restart = exitCode != 0 + } + } + + if !restart { + rm.active = false + return false, nil, nil + } + + rm.restartCount++ + + unlockOnExit = false + rm.active = true + rm.Unlock() + + ch := make(chan error) + go func() { + select { + case <-rm.cancel: + ch <- ErrRestartCanceled + close(ch) + case <-time.After(rm.timeout): + rm.Lock() + close(ch) + rm.active = false + rm.Unlock() + } + }() + + return true, ch, nil +} + +func (rm *restartManager) Cancel() error { + rm.Do(func() { + rm.Lock() + rm.canceled = true + close(rm.cancel) + rm.Unlock() + }) + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/runconfig/config.go b/extender/code/vendor/github.com/docker/docker/runconfig/config.go new file mode 100644 index 000000000..cbacf47df --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/runconfig/config.go @@ -0,0 +1,81 @@ +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/sysinfo" +) + +// ContainerDecoder implements httputils.ContainerDecoder +// calling DecodeContainerConfig. +type ContainerDecoder struct{} + +// DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + return decodeContainerConfig(src) +} + +// DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + return decodeHostConfig(src) +} + +// decodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper +// struct and returns both a Config and a HostConfig struct +// Be aware this function is not checking whether the resulted structs are nil, +// it's your business to do so +func decodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var w ContainerConfigWrapper + + decoder := json.NewDecoder(src) + if err := decoder.Decode(&w); err != nil { + return nil, nil, nil, err + } + + hc := w.getHostConfig() + + // Perform platform-specific processing of Volumes and Binds. + if w.Config != nil && hc != nil { + + // Initialize the volumes map if currently nil + if w.Config.Volumes == nil { + w.Config.Volumes = make(map[string]struct{}) + } + } + + // Certain parameters need daemon-side validation that cannot be done + // on the client, as only the daemon knows what is valid for the platform. + if err := validateNetMode(w.Config, hc); err != nil { + return nil, nil, nil, err + } + + // Validate isolation + if err := validateIsolation(hc); err != nil { + return nil, nil, nil, err + } + + // Validate QoS + if err := validateQoS(hc); err != nil { + return nil, nil, nil, err + } + + // Validate Resources + if err := validateResources(hc, sysinfo.New(true)); err != nil { + return nil, nil, nil, err + } + + // Validate Privileged + if err := validatePrivileged(hc); err != nil { + return nil, nil, nil, err + } + + // Validate ReadonlyRootfs + if err := validateReadonlyRootfs(hc); err != nil { + return nil, nil, nil, err + } + + return w.Config, hc, w.NetworkingConfig, nil +} diff --git a/extender/code/vendor/github.com/docker/docker/runconfig/config_unix.go b/extender/code/vendor/github.com/docker/docker/runconfig/config_unix.go new file mode 100644 index 000000000..65e8d6fcd --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/runconfig/config_unix.go @@ -0,0 +1,59 @@ +// +build !windows + +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` + *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. +} + +// getHostConfig gets the HostConfig of the Config. +// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { + w.InnerHostConfig.CPUShares = hc.CPUShares + } + if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { + w.InnerHostConfig.CpusetCpus = hc.CpusetCpus + } + + if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { + w.InnerHostConfig.VolumeDriver = hc.VolumeDriver + } + + hc = w.InnerHostConfig + } + + if hc != nil { + if w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards compatible API behavior. + SetDefaultNetModeIfBlank(hc) + + return hc +} diff --git a/extender/code/vendor/github.com/docker/docker/runconfig/config_windows.go b/extender/code/vendor/github.com/docker/docker/runconfig/config_windows.go new file mode 100644 index 000000000..cced59d4d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/runconfig/config_windows.go @@ -0,0 +1,19 @@ +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + HostConfig *container.HostConfig `json:"HostConfig,omitempty"` + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` +} + +// getHostConfig gets the HostConfig of the Config. +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + return w.HostConfig +} diff --git a/extender/code/vendor/github.com/docker/docker/runconfig/errors.go b/extender/code/vendor/github.com/docker/docker/runconfig/errors.go new file mode 100644 index 000000000..038fe3966 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/runconfig/errors.go @@ -0,0 +1,42 @@ +package runconfig // import "github.com/docker/docker/runconfig" + +const ( + // ErrConflictContainerNetworkAndLinks conflict between --net=container and links + ErrConflictContainerNetworkAndLinks validationError = "conflicting options: container type network can't be used with links. This would result in undefined behavior" + // ErrConflictSharedNetwork conflict between private and other networks + ErrConflictSharedNetwork validationError = "container sharing network namespace with another container or host cannot be connected to any other network" + // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. + ErrConflictHostNetwork validationError = "container cannot be disconnected from host network or connected to host network" + // ErrConflictNoNetwork conflict between private and other networks + ErrConflictNoNetwork validationError = "container cannot be connected to multiple networks with one of the networks in private (none) mode" + // ErrConflictNetworkAndDNS conflict between --dns and the network mode + ErrConflictNetworkAndDNS validationError = "conflicting options: dns and the network mode" + // ErrConflictNetworkHostname conflict between the hostname and the network mode + ErrConflictNetworkHostname validationError = "conflicting options: hostname and the network mode" + // ErrConflictHostNetworkAndLinks conflict between --net=host and links + ErrConflictHostNetworkAndLinks validationError = "conflicting options: host type networking can't be used with links. This would result in undefined behavior" + // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode + ErrConflictContainerNetworkAndMac validationError = "conflicting options: mac-address and the network mode" + // ErrConflictNetworkHosts conflict between add-host and the network mode + ErrConflictNetworkHosts validationError = "conflicting options: custom host-to-IP mapping and the network mode" + // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode + ErrConflictNetworkPublishPorts validationError = "conflicting options: port publishing and the container type network mode" + // ErrConflictNetworkExposePorts conflict between the expose option and the network mode + ErrConflictNetworkExposePorts validationError = "conflicting options: port exposing and the container type network mode" + // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address + ErrUnsupportedNetworkAndIP validationError = "user specified IP address is supported on user defined networks only" + // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address + ErrUnsupportedNetworkNoSubnetAndIP validationError = "user specified IP address is supported only when connecting to networks with user configured subnets" + // ErrUnsupportedNetworkAndAlias conflict between network mode and alias + ErrUnsupportedNetworkAndAlias validationError = "network-scoped alias is supported only for containers in user defined networks" + // ErrConflictUTSHostname conflict between the hostname and the UTS mode + ErrConflictUTSHostname validationError = "conflicting options: hostname and the UTS mode" +) + +type validationError string + +func (e validationError) Error() string { + return string(e) +} + +func (e validationError) InvalidParameter() {} diff --git a/extender/code/vendor/github.com/docker/docker/runconfig/hostconfig.go b/extender/code/vendor/github.com/docker/docker/runconfig/hostconfig.go new file mode 100644 index 000000000..7d99e5acf --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/runconfig/hostconfig.go @@ -0,0 +1,79 @@ +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "encoding/json" + "io" + "strings" + + "github.com/docker/docker/api/types/container" +) + +// DecodeHostConfig creates a HostConfig based on the specified Reader. +// It assumes the content of the reader will be JSON, and decodes it. +func decodeHostConfig(src io.Reader) (*container.HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err + } + + hc := w.getHostConfig() + return hc, nil +} + +// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure +// to default if it is not populated. This ensures backwards compatibility after +// the validation of the network mode was moved from the docker CLI to the +// docker daemon. +func SetDefaultNetModeIfBlank(hc *container.HostConfig) { + if hc != nil { + if hc.NetworkMode == container.NetworkMode("") { + hc.NetworkMode = container.NetworkMode("default") + } + } +} + +// validateNetContainerMode ensures that the various combinations of requested +// network settings wrt container mode are valid. +func validateNetContainerMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if parts[0] == "container" { + if len(parts) < 2 || parts[1] == "" { + return validationError("Invalid network mode: invalid container format container:") + } + } + + if hc.NetworkMode.IsContainer() && c.Hostname != "" { + return ErrConflictNetworkHostname + } + + if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { + return ErrConflictNetworkAndDNS + } + + if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { + return ErrConflictNetworkHosts + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts) { + return ErrConflictNetworkPublishPorts + } + + if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/extender/code/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go new file mode 100644 index 000000000..e579b06d9 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -0,0 +1,110 @@ +// +build !windows + +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + n := container.NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + err := validateNetContainerMode(c, hc) + if err != nil { + return err + } + + if hc.UTSMode.IsHost() && c.Hostname != "" { + return ErrConflictUTSHostname + } + + if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { + return ErrConflictHostNetworkAndLinks + } + + return nil +} + +// validateIsolation performs platform specific validation of +// isolation in the hostconfig structure. Linux only supports "default" +// which is LXC container isolation +func validateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("Invalid isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + } + return nil +} + +// validateQoS performs platform specific validation of the QoS settings +func validateQoS(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.IOMaximumBandwidth != 0 { + return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum bandwidth", runtime.GOOS) + } + + if hc.IOMaximumIOps != 0 { + return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum IOPs", runtime.GOOS) + } + return nil +} + +// validateResources performs platform specific validation of the resource settings +// cpu-rt-runtime and cpu-rt-period can not be greater than their parent, cpu-rt-runtime requires sys_nice +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.Resources.CPURealtimePeriod > 0 && !si.CPURealtimePeriod { + return fmt.Errorf("Your kernel does not support cgroup cpu real-time period") + } + + if hc.Resources.CPURealtimeRuntime > 0 && !si.CPURealtimeRuntime { + return fmt.Errorf("Your kernel does not support cgroup cpu real-time runtime") + } + + if hc.Resources.CPURealtimePeriod != 0 && hc.Resources.CPURealtimeRuntime != 0 && hc.Resources.CPURealtimeRuntime > hc.Resources.CPURealtimePeriod { + return fmt.Errorf("cpu real-time runtime cannot be higher than cpu real-time period") + } + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go b/extender/code/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go new file mode 100644 index 000000000..33a4668af --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go @@ -0,0 +1,96 @@ +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "fmt" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("nat") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return !container.NetworkMode(network).IsUserDefined() +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + if hc == nil { + return nil + } + + err := validateNetContainerMode(c, hc) + if err != nil { + return err + } + + if hc.NetworkMode.IsContainer() && hc.Isolation.IsHyperV() { + return fmt.Errorf("Using the network stack of another container is not supported while using Hyper-V Containers") + } + + return nil +} + +// validateIsolation performs platform specific validation of the +// isolation in the hostconfig structure. Windows supports 'default' (or +// blank), 'process', or 'hyperv'. +func validateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("Invalid isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) + } + return nil +} + +// validateQoS performs platform specific validation of the Qos settings +func validateQoS(hc *container.HostConfig) error { + return nil +} + +// validateResources performs platform specific validation of the resource settings +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.Resources.CPURealtimePeriod != 0 { + return fmt.Errorf("Windows does not support CPU real-time period") + } + if hc.Resources.CPURealtimeRuntime != 0 { + return fmt.Errorf("Windows does not support CPU real-time runtime") + } + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.Privileged { + return fmt.Errorf("Windows does not support privileged mode") + } + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.ReadonlyRootfs { + return fmt.Errorf("Windows does not support root filesystem in read-only mode") + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/docker/runconfig/opts/parse.go b/extender/code/vendor/github.com/docker/docker/runconfig/opts/parse.go new file mode 100644 index 000000000..8f7baeb63 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/runconfig/opts/parse.go @@ -0,0 +1,20 @@ +package opts // import "github.com/docker/docker/runconfig/opts" + +import ( + "strings" +) + +// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} +func ConvertKVStringsToMap(values []string) map[string]string { + result := make(map[string]string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = "" + } else { + result[kv[0]] = kv[1] + } + } + + return result +} diff --git a/extender/code/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go b/extender/code/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go new file mode 100644 index 000000000..bafb7b07f --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go @@ -0,0 +1,34 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "path" + + "github.com/docker/docker/api/types/mount" +) + +var lcowSpecificValidators mountValidator = func(m *mount.Mount) error { + if path.Clean(m.Target) == "/" { + return ErrVolumeTargetIsRoot + } + if m.Type == mount.TypeNamedPipe { + return errors.New("Linux containers on Windows do not support named pipe mounts") + } + return nil +} + +type lcowParser struct { + windowsParser +} + +func (p *lcowParser) ValidateMountConfig(mnt *mount.Mount) error { + return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators) +} + +func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators) +} + +func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators) +} diff --git a/extender/code/vendor/github.com/docker/docker/volume/mounts/linux_parser.go b/extender/code/vendor/github.com/docker/docker/volume/mounts/linux_parser.go new file mode 100644 index 000000000..035a24a8d --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/volume/mounts/linux_parser.go @@ -0,0 +1,420 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "fmt" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" +) + +type linuxParser struct { +} + +func linuxSplitRawSpec(raw string) ([]string, error) { + if strings.Count(raw, ":") > 2 { + return nil, errInvalidSpec(raw) + } + + arr := strings.SplitN(raw, ":", 3) + if arr[0] == "" { + return nil, errInvalidSpec(raw) + } + return arr, nil +} + +func linuxValidateNotRoot(p string) error { + p = path.Clean(strings.Replace(p, `\`, `/`, -1)) + if p == "/" { + return ErrVolumeTargetIsRoot + } + return nil +} +func linuxValidateAbsolute(p string) error { + p = strings.Replace(p, `\`, `/`, -1) + if path.IsAbs(p) { + return nil + } + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) +} +func (p *linuxParser) ValidateMountConfig(mnt *mount.Mount) error { + // there was something looking like a bug in existing codebase: + // - validateMountConfig on linux was called with options skipping bind source existence when calling ParseMountRaw + // - but not when calling ParseMountSpec directly... nor when the unit test called it directly + return p.validateMountConfigImpl(mnt, true) +} +func (p *linuxParser) validateMountConfigImpl(mnt *mount.Mount, validateBindSourceExists bool) error { + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := linuxValidateNotRoot(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + if err := linuxValidateAbsolute(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 && len(linuxPropagationModes) > 0 { + if _, ok := linuxPropagationModes[opts.Propagation]; !ok { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := linuxValidateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + if validateBindSourceExists { + exists, _, _ := currentFileInfoProvider.fileInfo(mnt.Source) + if !exists { + return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} + } + } + + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + case mount.TypeTmpfs: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + if len(mnt.Source) != 0 { + return &errMountConfig{mnt, errExtraField("Source")} + } + if _, err := p.ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil { + return &errMountConfig{mnt, err} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "ro": true, +} + +// label modes +var linuxLabelModes = map[string]bool{ + "Z": true, + "z": true, +} + +// consistency modes +var linuxConsistencyModes = map[mount.Consistency]bool{ + mount.ConsistencyFull: true, + mount.ConsistencyCached: true, + mount.ConsistencyDelegated: true, +} +var linuxPropagationModes = map[mount.Propagation]bool{ + mount.PropagationPrivate: true, + mount.PropagationRPrivate: true, + mount.PropagationSlave: true, + mount.PropagationRSlave: true, + mount.PropagationShared: true, + mount.PropagationRShared: true, +} + +const linuxDefaultPropagationMode = mount.PropagationRPrivate + +func linuxGetPropagation(mode string) mount.Propagation { + for _, o := range strings.Split(mode, ",") { + prop := mount.Propagation(o) + if linuxPropagationModes[prop] { + return prop + } + } + return linuxDefaultPropagationMode +} + +func linuxHasPropagation(mode string) bool { + for _, o := range strings.Split(mode, ",") { + if linuxPropagationModes[mount.Propagation(o)] { + return true + } + } + return false +} + +func linuxValidMountMode(mode string) bool { + if mode == "" { + return true + } + + rwModeCount := 0 + labelModeCount := 0 + propagationModeCount := 0 + copyModeCount := 0 + consistencyModeCount := 0 + + for _, o := range strings.Split(mode, ",") { + switch { + case rwModes[o]: + rwModeCount++ + case linuxLabelModes[o]: + labelModeCount++ + case linuxPropagationModes[mount.Propagation(o)]: + propagationModeCount++ + case copyModeExists(o): + copyModeCount++ + case linuxConsistencyModes[mount.Consistency(o)]: + consistencyModeCount++ + default: + return false + } + } + + // Only one string for each mode is allowed. + if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 || consistencyModeCount > 1 { + return false + } + return true +} + +func (p *linuxParser) ReadWrite(mode string) bool { + if !linuxValidMountMode(mode) { + return false + } + + for _, o := range strings.Split(mode, ",") { + if o == "ro" { + return false + } + } + return true +} + +func (p *linuxParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + arr, err := linuxSplitRawSpec(raw) + if err != nil { + return nil, err + } + + var spec mount.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if linuxValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. e.g. /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = arr[0] + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = arr[0] + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + + if !linuxValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + if path.IsAbs(spec.Source) { + spec.Type = mount.TypeBind + } else { + spec.Type = mount.TypeVolume + } + + spec.ReadOnly = !p.ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mount.TypeVolume { + spec.VolumeOptions = &mount.VolumeOptions{ + DriverConfig: &mount.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mount.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + if linuxHasPropagation(mode) { + spec.BindOptions = &mount.BindOptions{ + Propagation: linuxGetPropagation(mode), + } + } + + mp, err := p.parseMountSpec(spec, false) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} +func (p *linuxParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, true) +} +func (p *linuxParser) parseMountSpec(cfg mount.Mount, validateBindSourceExists bool) (*MountPoint, error) { + if err := p.validateMountConfigImpl(&cfg, validateBindSourceExists); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: path.Clean(filepath.ToSlash(cfg.Target)), + Type: cfg.Type, + Spec: cfg, + } + + switch cfg.Type { + case mount.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateNonCryptoID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = p.DefaultCopyMode() + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mount.TypeBind: + mp.Source = path.Clean(filepath.ToSlash(cfg.Source)) + if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 { + mp.Propagation = cfg.BindOptions.Propagation + } else { + // If user did not specify a propagation mode, get + // default propagation mode. + mp.Propagation = linuxDefaultPropagationMode + } + case mount.TypeTmpfs: + // NOP + } + return mp, nil +} + +func (p *linuxParser) ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !linuxValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + // For now don't allow propagation properties while importing + // volumes from data container. These volumes will inherit + // the same propagation property as of the original volume + // in data container. This probably can be relaxed in future. + if linuxHasPropagation(mode) { + return "", "", errInvalidMode(mode) + } + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +func (p *linuxParser) DefaultPropagationMode() mount.Propagation { + return linuxDefaultPropagationMode +} + +func (p *linuxParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { + var rawOpts []string + if readOnly { + rawOpts = append(rawOpts, "ro") + } + + if opt != nil && opt.Mode != 0 { + rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode)) + } + + if opt != nil && opt.SizeBytes != 0 { + // calculate suffix here, making this linux specific, but that is + // okay, since API is that way anyways. + + // we do this by finding the suffix that divides evenly into the + // value, returning the value itself, with no suffix, if it fails. + // + // For the most part, we don't enforce any semantic to this values. + // The operating system will usually align this and enforce minimum + // and maximums. + var ( + size = opt.SizeBytes + suffix string + ) + for _, r := range []struct { + suffix string + divisor int64 + }{ + {"g", 1 << 30}, + {"m", 1 << 20}, + {"k", 1 << 10}, + } { + if size%r.divisor == 0 { + size = size / r.divisor + suffix = r.suffix + break + } + } + + rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix)) + } + return strings.Join(rawOpts, ","), nil +} + +func (p *linuxParser) DefaultCopyMode() bool { + return true +} +func (p *linuxParser) ValidateVolumeName(name string) error { + return nil +} + +func (p *linuxParser) IsBackwardCompatible(m *MountPoint) bool { + return len(m.Source) > 0 || m.Driver == volume.DefaultDriverName +} + +func (p *linuxParser) ValidateTmpfsMountDestination(dest string) error { + if err := linuxValidateNotRoot(dest); err != nil { + return err + } + return linuxValidateAbsolute(dest) +} diff --git a/extender/code/vendor/github.com/docker/docker/volume/mounts/mounts.go b/extender/code/vendor/github.com/docker/docker/volume/mounts/mounts.go new file mode 100644 index 000000000..63a140681 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/volume/mounts/mounts.go @@ -0,0 +1,181 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +// MountPoint is the intersection point between a volume and a container. It +// specifies which volume is to be used and where inside a container it should +// be mounted. +// +// Note that this type is embedded in `container.Container` object and persisted to disk. +// Changes to this struct need to by synced with on disk state. +type MountPoint struct { + // Source is the source path of the mount. + // E.g. `mount --bind /foo /bar`, `/foo` is the `Source`. + Source string + // Destination is the path relative to the container root (`/`) to the mount point + // It is where the `Source` is mounted to + Destination string + // RW is set to true when the mountpoint should be mounted as read-write + RW bool + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name + Name string + // Driver is the volume driver used to create the volume (if it is a volume) + Driver string + // Type of mount to use, see `Type` definitions in github.com/docker/docker/api/types/mount + Type mounttypes.Type `json:",omitempty"` + // Volume is the volume providing data to this mountpoint. + // This is nil unless `Type` is set to `TypeVolume` + Volume volume.Volume `json:"-"` + + // Mode is the comma separated list of options supplied by the user when creating + // the bind/volume mount. + // Note Mode is not used on Windows + Mode string `json:"Relabel,omitempty"` // Originally field was `Relabel`" + + // Propagation describes how the mounts are propagated from the host into the + // mount point, and vice-versa. + // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // Note Propagation is not used on Windows + Propagation mounttypes.Propagation `json:",omitempty"` // Mount propagation string + + // Specifies if data should be copied from the container before the first mount + // Use a pointer here so we can tell if the user set this value explicitly + // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated + CopyData bool `json:"-"` + // ID is the opaque ID used to pass to the volume driver. + // This should be set by calls to `Mount` and unset by calls to `Unmount` + ID string `json:",omitempty"` + + // Sepc is a copy of the API request that created this mount. + Spec mounttypes.Mount + + // Some bind mounts should not be automatically created. + // (Some are auto-created for backwards-compatibility) + // This is checked on the API but setting this here prevents race conditions. + // where a bind dir existed during validation was removed before reaching the setup code. + SkipMountpointCreation bool + + // Track usage of this mountpoint + // Specifically needed for containers which are running and calls to `docker cp` + // because both these actions require mounting the volumes. + active int +} + +// Cleanup frees resources used by the mountpoint +func (m *MountPoint) Cleanup() error { + if m.Volume == nil || m.ID == "" { + return nil + } + + if err := m.Volume.Unmount(m.ID); err != nil { + return errors.Wrapf(err, "error unmounting volume %s", m.Volume.Name()) + } + + m.active-- + if m.active == 0 { + m.ID = "" + } + return nil +} + +// Setup sets up a mount point by either mounting the volume if it is +// configured, or creating the source directory if supplied. +// The, optional, checkFun parameter allows doing additional checking +// before creating the source directory on the host. +func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.Identity, checkFun func(m *MountPoint) error) (path string, err error) { + if m.SkipMountpointCreation { + return m.Source, nil + } + + defer func() { + if err != nil || !label.RelabelNeeded(m.Mode) { + return + } + + var sourcePath string + sourcePath, err = filepath.EvalSymlinks(m.Source) + if err != nil { + path = "" + err = errors.Wrapf(err, "error evaluating symlinks from mount source %q", m.Source) + return + } + err = label.Relabel(sourcePath, mountLabel, label.IsShared(m.Mode)) + if err == syscall.ENOTSUP { + err = nil + } + if err != nil { + path = "" + err = errors.Wrapf(err, "error setting label on mount source '%s'", sourcePath) + } + }() + + if m.Volume != nil { + id := m.ID + if id == "" { + id = stringid.GenerateNonCryptoID() + } + path, err := m.Volume.Mount(id) + if err != nil { + return "", errors.Wrapf(err, "error while mounting volume '%s'", m.Source) + } + + m.ID = id + m.active++ + return path, nil + } + + if len(m.Source) == 0 { + return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") + } + + if m.Type == mounttypes.TypeBind { + // Before creating the source directory on the host, invoke checkFun if it's not nil. One of + // the use case is to forbid creating the daemon socket as a directory if the daemon is in + // the process of shutting down. + if checkFun != nil { + if err := checkFun(m); err != nil { + return "", err + } + } + + // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) + // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it + if err := idtools.MkdirAllAndChownNew(m.Source, 0755, rootIDs); err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err != syscall.ENOTDIR { + return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) + } + } + } + } + return m.Source, nil +} + +// Path returns the path of a volume in a mount point. +func (m *MountPoint) Path() string { + if m.Volume != nil { + return m.Volume.Path() + } + return m.Source +} + +func errInvalidMode(mode string) error { + return errors.Errorf("invalid mode: %v", mode) +} + +func errInvalidSpec(spec string) error { + return errors.Errorf("invalid volume specification: '%s'", spec) +} diff --git a/extender/code/vendor/github.com/docker/docker/volume/mounts/parser.go b/extender/code/vendor/github.com/docker/docker/volume/mounts/parser.go new file mode 100644 index 000000000..73681750e --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/volume/mounts/parser.go @@ -0,0 +1,47 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "runtime" + + "github.com/docker/docker/api/types/mount" +) + +const ( + // OSLinux is the same as runtime.GOOS on linux + OSLinux = "linux" + // OSWindows is the same as runtime.GOOS on windows + OSWindows = "windows" +) + +// ErrVolumeTargetIsRoot is returned when the target destination is root. +// It's used by both LCOW and Linux parsers. +var ErrVolumeTargetIsRoot = errors.New("invalid specification: destination can't be '/'") + +// Parser represents a platform specific parser for mount expressions +type Parser interface { + ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) + ParseMountSpec(cfg mount.Mount) (*MountPoint, error) + ParseVolumesFrom(spec string) (string, string, error) + DefaultPropagationMode() mount.Propagation + ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) + DefaultCopyMode() bool + ValidateVolumeName(name string) error + ReadWrite(mode string) bool + IsBackwardCompatible(m *MountPoint) bool + HasResource(m *MountPoint, absPath string) bool + ValidateTmpfsMountDestination(dest string) error + ValidateMountConfig(mt *mount.Mount) error +} + +// NewParser creates a parser for a given container OS, depending on the current host OS (linux on a windows host will resolve to an lcowParser) +func NewParser(containerOS string) Parser { + switch containerOS { + case OSWindows: + return &windowsParser{} + } + if runtime.GOOS == OSWindows { + return &lcowParser{} + } + return &linuxParser{} +} diff --git a/extender/code/vendor/github.com/docker/docker/volume/mounts/validate.go b/extender/code/vendor/github.com/docker/docker/volume/mounts/validate.go new file mode 100644 index 000000000..9fc910902 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/volume/mounts/validate.go @@ -0,0 +1,28 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "fmt" + + "github.com/docker/docker/api/types/mount" + "github.com/pkg/errors" +) + +type errMountConfig struct { + mount *mount.Mount + err error +} + +func (e *errMountConfig) Error() string { + return fmt.Sprintf("invalid mount config for type %q: %v", e.mount.Type, e.err.Error()) +} + +func errBindSourceDoesNotExist(path string) error { + return errors.Errorf("bind source path does not exist: %s", path) +} + +func errExtraField(name string) error { + return errors.Errorf("field %s must not be specified", name) +} +func errMissingField(name string) error { + return errors.Errorf("field %s must not be empty", name) +} diff --git a/extender/code/vendor/github.com/docker/docker/volume/mounts/volume_copy.go b/extender/code/vendor/github.com/docker/docker/volume/mounts/volume_copy.go new file mode 100644 index 000000000..04056fa50 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/volume/mounts/volume_copy.go @@ -0,0 +1,23 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import "strings" + +// {=isEnabled} +var copyModes = map[string]bool{ + "nocopy": false, +} + +func copyModeExists(mode string) bool { + _, exists := copyModes[mode] + return exists +} + +// GetCopyMode gets the copy mode from the mode string for mounts +func getCopyMode(mode string, def bool) (bool, bool) { + for _, o := range strings.Split(mode, ",") { + if isEnabled, exists := copyModes[o]; exists { + return isEnabled, true + } + } + return def, false +} diff --git a/extender/code/vendor/github.com/docker/docker/volume/mounts/volume_unix.go b/extender/code/vendor/github.com/docker/docker/volume/mounts/volume_unix.go new file mode 100644 index 000000000..c6d51e071 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/volume/mounts/volume_unix.go @@ -0,0 +1,18 @@ +// +build linux freebsd darwin + +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "fmt" + "path/filepath" + "strings" +) + +func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool { + relPath, err := filepath.Rel(m.Destination, absolutePath) + return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) +} + +func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { + return false +} diff --git a/extender/code/vendor/github.com/docker/docker/volume/mounts/volume_windows.go b/extender/code/vendor/github.com/docker/docker/volume/mounts/volume_windows.go new file mode 100644 index 000000000..773e7db88 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/volume/mounts/volume_windows.go @@ -0,0 +1,8 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { + return false +} +func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool { + return false +} diff --git a/extender/code/vendor/github.com/docker/docker/volume/mounts/windows_parser.go b/extender/code/vendor/github.com/docker/docker/volume/mounts/windows_parser.go new file mode 100644 index 000000000..ac6104404 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/volume/mounts/windows_parser.go @@ -0,0 +1,456 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "fmt" + "os" + "regexp" + "runtime" + "strings" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/stringid" +) + +type windowsParser struct { +} + +const ( + // Spec should be in the format [source:]destination[:mode] + // + // Examples: c:\foo bar:d:rw + // c:\foo:d:\bar + // myname:d: + // d:\ + // + // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See + // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to + // test is https://regex-golang.appspot.com/assets/html/index.html + // + // Useful link for referencing named capturing groups: + // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex + // + // There are three match groups: source, destination and mode. + // + + // rxHostDir is the first option of a source + rxHostDir = `(?:\\\\\?\\)?[a-z]:[\\/](?:[^\\/:*?"<>|\r\n]+[\\/]?)*` + // rxName is the second option of a source + rxName = `[^\\/:*?"<>|\r\n]+` + + // RXReservedNames are reserved names not possible on Windows + rxReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` + + // rxPipe is a named path pipe (starts with `\\.\pipe\`, possibly with / instead of \) + rxPipe = `[/\\]{2}.[/\\]pipe[/\\][^:*?"<>|\r\n]+` + // rxSource is the combined possibilities for a source + rxSource = `((?P((` + rxHostDir + `)|(` + rxName + `)|(` + rxPipe + `))):)?` + + // Source. Can be either a host directory, a name, or omitted: + // HostDir: + // - Essentially using the folder solution from + // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html + // but adding case insensitivity. + // - Must be an absolute path such as c:\path + // - Can include spaces such as `c:\program files` + // - And then followed by a colon which is not in the capture group + // - And can be optional + // Name: + // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) + // - And then followed by a colon which is not in the capture group + // - And can be optional + + // rxDestination is the regex expression for the mount destination + rxDestination = `(?P((?:\\\\\?\\)?([a-z]):((?:[\\/][^\\/:*?"<>\r\n]+)*[\\/]?))|(` + rxPipe + `))` + + rxLCOWDestination = `(?P/(?:[^\\/:*?"<>\r\n]+[/]?)*)` + // Destination (aka container path): + // - Variation on hostdir but can be a drive followed by colon as well + // - If a path, must be absolute. Can include spaces + // - Drive cannot be c: (explicitly checked in code, not RegEx) + + // rxMode is the regex expression for the mode of the mount + // Mode (optional): + // - Hopefully self explanatory in comparison to above regex's. + // - Colon is not in the capture group + rxMode = `(:(?P(?i)ro|rw))?` +) + +type mountValidator func(mnt *mount.Mount) error + +func windowsSplitRawSpec(raw, destRegex string) ([]string, error) { + specExp := regexp.MustCompile(`^` + rxSource + destRegex + rxMode + `$`) + match := specExp.FindStringSubmatch(strings.ToLower(raw)) + + // Must have something back + if len(match) == 0 { + return nil, errInvalidSpec(raw) + } + + var split []string + matchgroups := make(map[string]string) + // Pull out the sub expressions from the named capture groups + for i, name := range specExp.SubexpNames() { + matchgroups[name] = strings.ToLower(match[i]) + } + if source, exists := matchgroups["source"]; exists { + if source != "" { + split = append(split, source) + } + } + if destination, exists := matchgroups["destination"]; exists { + if destination != "" { + split = append(split, destination) + } + } + if mode, exists := matchgroups["mode"]; exists { + if mode != "" { + split = append(split, mode) + } + } + // Fix #26329. If the destination appears to be a file, and the source is null, + // it may be because we've fallen through the possible naming regex and hit a + // situation where the user intention was to map a file into a container through + // a local volume, but this is not supported by the platform. + if matchgroups["source"] == "" && matchgroups["destination"] != "" { + volExp := regexp.MustCompile(`^` + rxName + `$`) + reservedNameExp := regexp.MustCompile(`^` + rxReservedNames + `$`) + + if volExp.MatchString(matchgroups["destination"]) { + if reservedNameExp.MatchString(matchgroups["destination"]) { + return nil, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", matchgroups["destination"]) + } + } else { + + exists, isDir, _ := currentFileInfoProvider.fileInfo(matchgroups["destination"]) + if exists && !isDir { + return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) + + } + } + } + return split, nil +} + +func windowsValidMountMode(mode string) bool { + if mode == "" { + return true + } + return rwModes[strings.ToLower(mode)] +} +func windowsValidateNotRoot(p string) error { + p = strings.ToLower(strings.Replace(p, `/`, `\`, -1)) + if p == "c:" || p == `c:\` { + return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) + } + return nil +} + +var windowsSpecificValidators mountValidator = func(mnt *mount.Mount) error { + return windowsValidateNotRoot(mnt.Target) +} + +func windowsValidateRegex(p, r string) error { + if regexp.MustCompile(`^` + r + `$`).MatchString(strings.ToLower(p)) { + return nil + } + return fmt.Errorf("invalid mount path: '%s'", p) +} +func windowsValidateAbsolute(p string) error { + if err := windowsValidateRegex(p, rxDestination); err != nil { + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) + } + return nil +} + +func windowsDetectMountType(p string) mount.Type { + if strings.HasPrefix(p, `\\.\pipe\`) { + return mount.TypeNamedPipe + } else if regexp.MustCompile(`^` + rxHostDir + `$`).MatchString(p) { + return mount.TypeBind + } else { + return mount.TypeVolume + } +} + +func (p *windowsParser) ReadWrite(mode string) bool { + return strings.ToLower(mode) != "ro" +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func (p *windowsParser) ValidateVolumeName(name string) error { + nameExp := regexp.MustCompile(`^` + rxName + `$`) + if !nameExp.MatchString(name) { + return errors.New("invalid volume name") + } + nameExp = regexp.MustCompile(`^` + rxReservedNames + `$`) + if nameExp.MatchString(name) { + return fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) + } + return nil +} +func (p *windowsParser) ValidateMountConfig(mnt *mount.Mount) error { + return p.validateMountConfigReg(mnt, rxDestination, windowsSpecificValidators) +} + +type fileInfoProvider interface { + fileInfo(path string) (exist, isDir bool, err error) +} + +type defaultFileInfoProvider struct { +} + +func (defaultFileInfoProvider) fileInfo(path string) (exist, isDir bool, err error) { + fi, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return false, false, err + } + return false, false, nil + } + return true, fi.IsDir(), nil +} + +var currentFileInfoProvider fileInfoProvider = defaultFileInfoProvider{} + +func (p *windowsParser) validateMountConfigReg(mnt *mount.Mount, destRegex string, additionalValidators ...mountValidator) error { + + for _, v := range additionalValidators { + if err := v(mnt); err != nil { + return &errMountConfig{mnt, err} + } + } + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := windowsValidateRegex(mnt.Target, destRegex); err != nil { + return &errMountConfig{mnt, err} + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := windowsValidateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + exists, isdir, err := currentFileInfoProvider.fileInfo(mnt.Source) + if err != nil { + return &errMountConfig{mnt, err} + } + if !exists { + return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} + } + if !isdir { + return &errMountConfig{mnt, fmt.Errorf("source path must be a directory")} + } + + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + + if len(mnt.Source) != 0 { + if err := p.ValidateVolumeName(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + } + case mount.TypeNamedPipe: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if mnt.ReadOnly { + return &errMountConfig{mnt, errExtraField("ReadOnly")} + } + + if windowsDetectMountType(mnt.Source) != mount.TypeNamedPipe { + return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Source)} + } + + if windowsDetectMountType(mnt.Target) != mount.TypeNamedPipe { + return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Target)} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} +func (p *windowsParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + return p.parseMountRaw(raw, volumeDriver, rxDestination, true, windowsSpecificValidators) +} + +func (p *windowsParser) parseMountRaw(raw, volumeDriver, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { + arr, err := windowsSplitRawSpec(raw, destRegex) + if err != nil { + return nil, err + } + + var spec mount.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if windowsValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. e.g. /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = strings.Replace(arr[0], `/`, `\`, -1) + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = strings.Replace(arr[0], `/`, `\`, -1) + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + if convertTargetToBackslash { + spec.Target = strings.Replace(spec.Target, `/`, `\`, -1) + } + + if !windowsValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + spec.Type = windowsDetectMountType(spec.Source) + spec.ReadOnly = !p.ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mount.TypeVolume { + spec.VolumeOptions = &mount.VolumeOptions{ + DriverConfig: &mount.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mount.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + + mp, err := p.parseMountSpec(spec, destRegex, convertTargetToBackslash, additionalValidators...) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} + +func (p *windowsParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, rxDestination, true, windowsSpecificValidators) +} +func (p *windowsParser) parseMountSpec(cfg mount.Mount, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { + if err := p.validateMountConfigReg(&cfg, destRegex, additionalValidators...); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: cfg.Target, + Type: cfg.Type, + Spec: cfg, + } + if convertTargetToBackslash { + mp.Destination = strings.Replace(cfg.Target, `/`, `\`, -1) + } + + switch cfg.Type { + case mount.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateNonCryptoID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = p.DefaultCopyMode() + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mount.TypeBind: + mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) + case mount.TypeNamedPipe: + mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) + } + // cleanup trailing `\` except for paths like `c:\` + if len(mp.Source) > 3 && mp.Source[len(mp.Source)-1] == '\\' { + mp.Source = mp.Source[:len(mp.Source)-1] + } + if len(mp.Destination) > 3 && mp.Destination[len(mp.Destination)-1] == '\\' { + mp.Destination = mp.Destination[:len(mp.Destination)-1] + } + return mp, nil +} + +func (p *windowsParser) ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !windowsValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +func (p *windowsParser) DefaultPropagationMode() mount.Propagation { + return mount.Propagation("") +} + +func (p *windowsParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { + return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) +} +func (p *windowsParser) DefaultCopyMode() bool { + return false +} +func (p *windowsParser) IsBackwardCompatible(m *MountPoint) bool { + return false +} + +func (p *windowsParser) ValidateTmpfsMountDestination(dest string) error { + return errors.New("Platform does not support tmpfs") +} diff --git a/extender/code/vendor/github.com/docker/docker/volume/volume.go b/extender/code/vendor/github.com/docker/docker/volume/volume.go new file mode 100644 index 000000000..61c824397 --- /dev/null +++ b/extender/code/vendor/github.com/docker/docker/volume/volume.go @@ -0,0 +1,69 @@ +package volume // import "github.com/docker/docker/volume" + +import ( + "time" +) + +// DefaultDriverName is the driver name used for the driver +// implemented in the local package. +const DefaultDriverName = "local" + +// Scopes define if a volume has is cluster-wide (global) or local only. +// Scopes are returned by the volume driver when it is queried for capabilities and then set on a volume +const ( + LocalScope = "local" + GlobalScope = "global" +) + +// Driver is for creating and removing volumes. +type Driver interface { + // Name returns the name of the volume driver. + Name() string + // Create makes a new volume with the given name. + Create(name string, opts map[string]string) (Volume, error) + // Remove deletes the volume. + Remove(vol Volume) (err error) + // List lists all the volumes the driver has + List() ([]Volume, error) + // Get retrieves the volume with the requested name + Get(name string) (Volume, error) + // Scope returns the scope of the driver (e.g. `global` or `local`). + // Scope determines how the driver is handled at a cluster level + Scope() string +} + +// Capability defines a set of capabilities that a driver is able to handle. +type Capability struct { + // Scope is the scope of the driver, `global` or `local` + // A `global` scope indicates that the driver manages volumes across the cluster + // A `local` scope indicates that the driver only manages volumes resources local to the host + // Scope is declared by the driver + Scope string +} + +// Volume is a place to store data. It is backed by a specific driver, and can be mounted. +type Volume interface { + // Name returns the name of the volume + Name() string + // DriverName returns the name of the driver which owns this volume. + DriverName() string + // Path returns the absolute path to the volume. + Path() string + // Mount mounts the volume and returns the absolute path to + // where it can be consumed. + Mount(id string) (string, error) + // Unmount unmounts the volume when it is no longer in use. + Unmount(id string) error + // CreatedAt returns Volume Creation time + CreatedAt() (time.Time, error) + // Status returns low-level status information about a volume + Status() map[string]interface{} +} + +// DetailedVolume wraps a Volume with user-defined labels, options, and cluster scope (e.g., `local` or `global`) +type DetailedVolume interface { + Labels() map[string]string + Options() map[string]string + Scope() string + Volume +} diff --git a/extender/code/vendor/github.com/docker/go-connections/LICENSE b/extender/code/vendor/github.com/docker/go-connections/LICENSE new file mode 100644 index 000000000..b55b37bc3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/docker/go-connections/nat/nat.go b/extender/code/vendor/github.com/docker/go-connections/nat/nat.go new file mode 100644 index 000000000..bb7e4e336 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/nat/nat.go @@ -0,0 +1,242 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. +package nat + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRangeToInt(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := ParsePort(portStr) + return port +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRangeToInt(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp", "sctp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + for _, rawPort := range ports { + portMappings, err := ParsePortSpec(rawPort) + if err != nil { + return nil, nil, err + } + + for _, portMapping := range portMappings { + port := portMapping.Port + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, portMapping.Binding) + } + } + return exposedPorts, bindings, nil +} + +// PortMapping is a data object mapping a Port to a PortBinding +type PortMapping struct { + Port Port + Binding PortBinding +} + +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerport := parts[n-1] + + switch n { + case 1: + return "", "", containerport + case 2: + return "", parts[0], containerport + case 3: + return parts[0], parts[1], containerport + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + } +} + +// ParsePortSpec parses a port specification string into a slice of PortMappings +func ParsePortSpec(rawPort string) ([]PortMapping, error) { + var proto string + rawIP, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) + + // Strip [] from IPV6 addresses + ip, _, err := net.SplitHostPort(rawIP + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + } + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) + } + if containerPort == "" { + return nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) + if err != nil { + return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, fmt.Errorf("Invalid proto: %s", proto) + } + + ports := []PortMapping{} + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, err + } + + binding := PortBinding{ + HostIP: ip, + HostPort: hostPort, + } + ports = append(ports, PortMapping{Port: port, Binding: binding}) + } + return ports, nil +} diff --git a/extender/code/vendor/github.com/docker/go-connections/nat/parse.go b/extender/code/vendor/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 000000000..892adf8c6 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,57 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/extender/code/vendor/github.com/docker/go-connections/nat/sort.go b/extender/code/vendor/github.com/docker/go-connections/nat/sort.go new file mode 100644 index 000000000..ce950171e --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/nat/sort.go @@ -0,0 +1,96 @@ +package nat + +import ( + "sort" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/extender/code/vendor/github.com/docker/go-connections/sockets/README.md b/extender/code/vendor/github.com/docker/go-connections/sockets/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/extender/code/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/extender/code/vendor/github.com/docker/go-connections/sockets/inmem_socket.go new file mode 100644 index 000000000..99846ffdd --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/sockets/inmem_socket.go @@ -0,0 +1,81 @@ +package sockets + +import ( + "errors" + "net" + "sync" +) + +var errClosed = errors.New("use of closed network connection") + +// InmemSocket implements net.Listener using in-memory only connections. +type InmemSocket struct { + chConn chan net.Conn + chClose chan struct{} + addr string + mu sync.Mutex +} + +// dummyAddr is used to satisfy net.Addr for the in-mem socket +// it is just stored as a string and returns the string for all calls +type dummyAddr string + +// NewInmemSocket creates an in-memory only net.Listener +// The addr argument can be any string, but is used to satisfy the `Addr()` part +// of the net.Listener interface +func NewInmemSocket(addr string, bufSize int) *InmemSocket { + return &InmemSocket{ + chConn: make(chan net.Conn, bufSize), + chClose: make(chan struct{}), + addr: addr, + } +} + +// Addr returns the socket's addr string to satisfy net.Listener +func (s *InmemSocket) Addr() net.Addr { + return dummyAddr(s.addr) +} + +// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. +func (s *InmemSocket) Accept() (net.Conn, error) { + select { + case conn := <-s.chConn: + return conn, nil + case <-s.chClose: + return nil, errClosed + } +} + +// Close closes the listener. It will be unavailable for use once closed. +func (s *InmemSocket) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + select { + case <-s.chClose: + default: + close(s.chClose) + } + return nil +} + +// Dial is used to establish a connection with the in-mem server +func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { + srvConn, clientConn := net.Pipe() + select { + case s.chConn <- srvConn: + case <-s.chClose: + return nil, errClosed + } + + return clientConn, nil +} + +// Network returns the addr string, satisfies net.Addr +func (a dummyAddr) Network() string { + return string(a) +} + +// String returns the string form +func (a dummyAddr) String() string { + return string(a) +} diff --git a/extender/code/vendor/github.com/docker/go-connections/sockets/proxy.go b/extender/code/vendor/github.com/docker/go-connections/sockets/proxy.go new file mode 100644 index 000000000..98e9a1dc6 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -0,0 +1,51 @@ +package sockets + +import ( + "net" + "net/url" + "os" + "strings" + + "golang.org/x/net/proxy" +) + +// GetProxyEnv allows access to the uppercase and the lowercase forms of +// proxy-related variables. See the Go specification for details on these +// variables. https://golang.org/pkg/net/http/ +func GetProxyEnv(key string) string { + proxyValue := os.Getenv(strings.ToUpper(key)) + if proxyValue == "" { + return os.Getenv(strings.ToLower(key)) + } + return proxyValue +} + +// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a +// proxy.Dialer which will route the connections through the proxy using the +// given dialer. +func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { + allProxy := GetProxyEnv("all_proxy") + if len(allProxy) == 0 { + return direct, nil + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return direct, err + } + + proxyFromURL, err := proxy.FromURL(proxyURL, direct) + if err != nil { + return direct, err + } + + noProxy := GetProxyEnv("no_proxy") + if len(noProxy) == 0 { + return proxyFromURL, nil + } + + perHost := proxy.NewPerHost(proxyFromURL, direct) + perHost.AddFromString(noProxy) + + return perHost, nil +} diff --git a/extender/code/vendor/github.com/docker/go-connections/sockets/sockets.go b/extender/code/vendor/github.com/docker/go-connections/sockets/sockets.go new file mode 100644 index 000000000..a1d7beb4d --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -0,0 +1,38 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "errors" + "net" + "net/http" + "time" +) + +// Why 32? See https://github.com/docker/docker/pull/8035. +const defaultTimeout = 32 * time.Second + +// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. +var ErrProtocolNotAvailable = errors.New("protocol not available") + +// ConfigureTransport configures the specified Transport according to the +// specified proto and addr. +// If the proto is unix (using a unix socket to communicate) or npipe the +// compression is disabled. +func ConfigureTransport(tr *http.Transport, proto, addr string) error { + switch proto { + case "unix": + return configureUnixTransport(tr, proto, addr) + case "npipe": + return configureNpipeTransport(tr, proto, addr) + default: + tr.Proxy = http.ProxyFromEnvironment + dialer, err := DialerFromEnvironment(&net.Dialer{ + Timeout: defaultTimeout, + }) + if err != nil { + return err + } + tr.Dial = dialer.Dial + } + return nil +} diff --git a/extender/code/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/extender/code/vendor/github.com/docker/go-connections/sockets/sockets_unix.go new file mode 100644 index 000000000..386cf0dbb --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package sockets + +import ( + "fmt" + "net" + "net/http" + "syscall" + "time" +) + +const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + if len(addr) > maxUnixSocketPathSize { + return fmt.Errorf("Unix socket path %q is too long", addr) + } + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, defaultTimeout) + } + return nil +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +// DialPipe connects to a Windows named pipe. +// This is not supported on other OSes. +func DialPipe(_ string, _ time.Duration) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/extender/code/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/extender/code/vendor/github.com/docker/go-connections/sockets/sockets_windows.go new file mode 100644 index 000000000..5c21644e1 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -0,0 +1,27 @@ +package sockets + +import ( + "net" + "net/http" + "time" + + "github.com/Microsoft/go-winio" +) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return DialPipe(addr, defaultTimeout) + } + return nil +} + +// DialPipe connects to a Windows named pipe. +func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { + return winio.DialPipe(addr, &timeout) +} diff --git a/extender/code/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/extender/code/vendor/github.com/docker/go-connections/sockets/tcp_socket.go new file mode 100644 index 000000000..53cbb6c79 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/sockets/tcp_socket.go @@ -0,0 +1,22 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "crypto/tls" + "net" +) + +// NewTCPSocket creates a TCP socket listener with the specified address and +// the specified tls configuration. If TLSConfig is set, will encapsulate the +// TCP listener inside a TLS one. +func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + if tlsConfig != nil { + tlsConfig.NextProtos = []string{"http/1.1"} + l = tls.NewListener(l, tlsConfig) + } + return l, nil +} diff --git a/extender/code/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/extender/code/vendor/github.com/docker/go-connections/sockets/unix_socket.go new file mode 100644 index 000000000..a8b5dbb6f --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -0,0 +1,32 @@ +// +build !windows + +package sockets + +import ( + "net" + "os" + "syscall" +) + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { + return nil, err + } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + if err := os.Chown(path, 0, gid); err != nil { + l.Close() + return nil, err + } + if err := os.Chmod(path, 0660); err != nil { + l.Close() + return nil, err + } + return l, nil +} diff --git a/extender/code/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/extender/code/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go new file mode 100644 index 000000000..1ca0965e0 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go @@ -0,0 +1,18 @@ +// +build go1.7 + +package tlsconfig + +import ( + "crypto/x509" + "runtime" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/extender/code/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/extender/code/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go new file mode 100644 index 000000000..1ff81c333 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package tlsconfig + +import ( + "crypto/x509" +) + +// SystemCertPool returns an new empty cert pool, +// accessing system cert pool is supported in go 1.7 +func SystemCertPool() (*x509.CertPool, error) { + return x509.NewCertPool(), nil +} diff --git a/extender/code/vendor/github.com/docker/go-connections/tlsconfig/config.go b/extender/code/vendor/github.com/docker/go-connections/tlsconfig/config.go new file mode 100644 index 000000000..0ef3fdcb4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -0,0 +1,254 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + + "github.com/pkg/errors" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType + // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS + // creds will include exclusively the roots in that CA file. If no CA file is provided, + // the system pool will be used. + ExclusiveRootPools bool + MinVersion uint16 + // If Passphrase is set, it will be used to decrypt a TLS private key + // if the key is encrypted + Passphrase string +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, +} + +// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls +// options struct but wants to use a commonly accepted set of TLS cipher suites, with +// known weak algorithms removed. +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionSSL30: {}, + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} + +// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. +func ServerDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Avoid fallback by default to SSL protocols < TLS1.2 + MinVersion: tls.VersionTLS12, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, + } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig +} + +// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. +func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, + } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + var ( + certPool *x509.CertPool + err error + ) + if exclusivePool { + certPool = x509.NewCertPool() + } else { + certPool, err = SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to read system certificates: %v", err) + } + } + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) + } + return certPool, nil +} + +// isValidMinVersion checks that the input value is a valid tls minimum version +func isValidMinVersion(version uint16) bool { + _, ok := allTLSVersions[version] + return ok +} + +// adjustMinVersion sets the MinVersion on `config`, the input configuration. +// It assumes the current MinVersion on the `config` is the lowest allowed. +func adjustMinVersion(options Options, config *tls.Config) error { + if options.MinVersion > 0 { + if !isValidMinVersion(options.MinVersion) { + return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + } + if options.MinVersion < config.MinVersion { + return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + } + config.MinVersion = options.MinVersion + } + + return nil +} + +// IsErrEncryptedKey returns true if the 'err' is an error of incorrect +// password when tryin to decrypt a TLS private key +func IsErrEncryptedKey(err error) bool { + return errors.Cause(err) == x509.IncorrectPasswordError +} + +// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. +// If the private key is encrypted, 'passphrase' is used to decrypted the +// private key. +func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { + // this section makes some small changes to code from notary/tuf/utils/x509.go + pemBlock, _ := pem.Decode(keyBytes) + if pemBlock == nil { + return nil, fmt.Errorf("no valid private key found") + } + + var err error + if x509.IsEncryptedPEMBlock(pemBlock) { + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if err != nil { + return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + } + keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + } + + return keyBytes, nil +} + +// getCert returns a Certificate from the CertFile and KeyFile in 'options', +// if the key is encrypted, the Passphrase in 'options' will be used to +// decrypt it. +func getCert(options Options) ([]tls.Certificate, error) { + if options.CertFile == "" && options.KeyFile == "" { + return nil, nil + } + + errMessage := "Could not load X509 key pair" + + cert, err := ioutil.ReadFile(options.CertFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + return []tls.Certificate{tlsCert}, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault() + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + tlsCerts, err := getCert(options) + if err != nil { + return nil, err + } + tlsConfig.Certificates = tlsCerts + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault() + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} diff --git a/extender/code/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/extender/code/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go new file mode 100644 index 000000000..6b4c6a7c0 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -0,0 +1,17 @@ +// +build go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/extender/code/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/extender/code/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go new file mode 100644 index 000000000..ee22df47c --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go @@ -0,0 +1,15 @@ +// +build !go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/extender/code/vendor/github.com/docker/go-events/.gitignore b/extender/code/vendor/github.com/docker/go-events/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-events/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/extender/code/vendor/github.com/docker/go-events/CONTRIBUTING.md b/extender/code/vendor/github.com/docker/go-events/CONTRIBUTING.md new file mode 100644 index 000000000..d813af779 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-events/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing to Docker open source projects + +Want to hack on go-events? Awesome! Here are instructions to get you started. + +go-events is part of the [Docker](https://www.docker.com) project, and +follows the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/extender/code/vendor/github.com/docker/go-events/LICENSE b/extender/code/vendor/github.com/docker/go-events/LICENSE new file mode 100644 index 000000000..6d630cf59 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-events/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/docker/go-events/MAINTAINERS b/extender/code/vendor/github.com/docker/go-events/MAINTAINERS new file mode 100644 index 000000000..e414d82e9 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-events/MAINTAINERS @@ -0,0 +1,46 @@ +# go-events maintainers file +# +# This file describes who runs the docker/go-events project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "aluzzardi", + "lk4d4", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/extender/code/vendor/github.com/docker/go-events/README.md b/extender/code/vendor/github.com/docker/go-events/README.md new file mode 100644 index 000000000..0acafc279 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-events/README.md @@ -0,0 +1,117 @@ +# Docker Events Package + +[![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events) +[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events) + +The Docker `events` package implements a composable event distribution package +for Go. + +Originally created to implement the [notifications in Docker Registry +2](https://github.com/docker/distribution/blob/master/docs/notifications.md), +we've found the pattern to be useful in other applications. This package is +most of the same code with slightly updated interfaces. Much of the internals +have been made available. + +## Usage + +The `events` package centers around a `Sink` type. Events are written with +calls to `Sink.Write(event Event)`. Sinks can be wired up in various +configurations to achieve interesting behavior. + +The canonical example is that employed by the +[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications) +package. Let's say we have a type `httpSink` where we'd like to queue +notifications. As a rule, it should send a single http request and return an +error if it fails: + +```go +func (h *httpSink) Write(event Event) error { + p, err := json.Marshal(event) + if err != nil { + return err + } + body := bytes.NewReader(p) + resp, err := h.client.Post(h.url, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.Status != 200 { + return errors.New("unexpected status") + } + + return nil +} + +// implement (*httpSink).Close() +``` + +With just that, we can start using components from this package. One can call +`(*httpSink).Write` to send events as the body of a post request to a +configured URL. + +### Retries + +HTTP can be unreliable. The first feature we'd like is to have some retry: + +```go +hs := newHTTPSink(/*...*/) +retry := NewRetryingSink(hs, NewBreaker(5, time.Second)) +``` + +We now have a sink that will retry events against the `httpSink` until they +succeed. The retry will backoff for one second after 5 consecutive failures +using the breaker strategy. + +### Queues + +This isn't quite enough. We we want a sink that doesn't block while we are +waiting for events to be sent. Let's add a `Queue`: + +```go +queue := NewQueue(retry) +``` + +Now, we have an unbounded queue that will work through all events sent with +`(*Queue).Write`. Events can be added asynchronously to the queue without +blocking the current execution path. This is ideal for use in an http request. + +### Broadcast + +It usually turns out that you want to send to more than one listener. We can +use `Broadcaster` to support this: + +```go +var broadcast = NewBroadcaster() // make it available somewhere in your application. +broadcast.Add(queue) // add your queue! +broadcast.Add(queue2) // and another! +``` + +With the above, we can now call `broadcast.Write` in our http handlers and have +all the events distributed to each queue. Because the events are queued, not +listener blocks another. + +### Extending + +For the most part, the above is sufficient for a lot of applications. However, +extending the above functionality can be done implementing your own `Sink`. The +behavior and semantics of the sink can be completely dependent on the +application requirements. The interface is provided below for reference: + +```go +type Sink { + Write(Event) error + Close() error +} +``` + +Application behavior can be controlled by how `Write` behaves. The examples +above are designed to queue the message and return as quickly as possible. +Other implementations may block until the event is committed to durable +storage. + +## Copyright and license + +Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License, +Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/extender/code/vendor/github.com/docker/go-events/broadcast.go b/extender/code/vendor/github.com/docker/go-events/broadcast.go new file mode 100644 index 000000000..5120078df --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-events/broadcast.go @@ -0,0 +1,178 @@ +package events + +import ( + "fmt" + "sync" + + "github.com/sirupsen/logrus" +) + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan Event + adds chan configureRequest + removes chan configureRequest + + shutdown chan struct{} + closed chan struct{} + once sync.Once +} + +// NewBroadcaster appends one or more sinks to the list of sinks. The +// broadcaster behavior will be affected by the properties of the sink. +// Generally, the sink should accept all messages and deal with reliability on +// its own. Use of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan Event), + adds: make(chan configureRequest), + removes: make(chan configureRequest), + shutdown: make(chan struct{}), + closed: make(chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts an event to be dispatched to all sinks. This method will never +// fail and should never block (hopefully!). The caller cedes the memory to the +// broadcaster and should not modify it after calling write. +func (b *Broadcaster) Write(event Event) error { + select { + case b.events <- event: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Add the sink to the broadcaster. +// +// The provided sink must be comparable with equality. Typically, this just +// works with a regular pointer type. +func (b *Broadcaster) Add(sink Sink) error { + return b.configure(b.adds, sink) +} + +// Remove the provided sink. +func (b *Broadcaster) Remove(sink Sink) error { + return b.configure(b.removes, sink) +} + +type configureRequest struct { + sink Sink + response chan error +} + +func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error { + response := make(chan error, 1) + + for { + select { + case ch <- configureRequest{ + sink: sink, + response: response}: + ch = nil + case err := <-response: + return err + case <-b.closed: + return ErrSinkClosed + } + } +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + b.once.Do(func() { + close(b.shutdown) + }) + + <-b.closed + return nil +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + defer close(b.closed) + remove := func(target Sink) { + for i, sink := range b.sinks { + if sink == target { + b.sinks = append(b.sinks[:i], b.sinks[i+1:]...) + break + } + } + } + + for { + select { + case event := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(event); err != nil { + if err == ErrSinkClosed { + // remove closed sinks + remove(sink) + continue + } + logrus.WithField("event", event).WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: dropping event") + } + } + case request := <-b.adds: + // while we have to iterate for add/remove, common iteration for + // send is faster against slice. + + var found bool + for _, sink := range b.sinks { + if request.sink == sink { + found = true + break + } + } + + if !found { + b.sinks = append(b.sinks, request.sink) + } + // b.sinks[request.sink] = struct{}{} + request.response <- nil + case request := <-b.removes: + remove(request.sink) + request.response <- nil + case <-b.shutdown: + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil && err != ErrSinkClosed { + logrus.WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: closing sink failed") + } + } + return + } + } +} + +func (b *Broadcaster) String() string { + // Serialize copy of this broadcaster without the sync.Once, to avoid + // a data race. + + b2 := map[string]interface{}{ + "sinks": b.sinks, + "events": b.events, + "adds": b.adds, + "removes": b.removes, + + "shutdown": b.shutdown, + "closed": b.closed, + } + + return fmt.Sprint(b2) +} diff --git a/extender/code/vendor/github.com/docker/go-events/channel.go b/extender/code/vendor/github.com/docker/go-events/channel.go new file mode 100644 index 000000000..802cf51ff --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-events/channel.go @@ -0,0 +1,61 @@ +package events + +import ( + "fmt" + "sync" +) + +// Channel provides a sink that can be listened on. The writer and channel +// listener must operate in separate goroutines. +// +// Consumers should listen on Channel.C until Closed is closed. +type Channel struct { + C chan Event + + closed chan struct{} + once sync.Once +} + +// NewChannel returns a channel. If buffer is zero, the channel is +// unbuffered. +func NewChannel(buffer int) *Channel { + return &Channel{ + C: make(chan Event, buffer), + closed: make(chan struct{}), + } +} + +// Done returns a channel that will always proceed once the sink is closed. +func (ch *Channel) Done() chan struct{} { + return ch.closed +} + +// Write the event to the channel. Must be called in a separate goroutine from +// the listener. +func (ch *Channel) Write(event Event) error { + select { + case ch.C <- event: + return nil + case <-ch.closed: + return ErrSinkClosed + } +} + +// Close the channel sink. +func (ch *Channel) Close() error { + ch.once.Do(func() { + close(ch.closed) + }) + + return nil +} + +func (ch *Channel) String() string { + // Serialize a copy of the Channel that doesn't contain the sync.Once, + // to avoid a data race. + ch2 := map[string]interface{}{ + "C": ch.C, + "closed": ch.closed, + } + return fmt.Sprint(ch2) +} diff --git a/extender/code/vendor/github.com/docker/go-events/errors.go b/extender/code/vendor/github.com/docker/go-events/errors.go new file mode 100644 index 000000000..56db7c251 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-events/errors.go @@ -0,0 +1,10 @@ +package events + +import "fmt" + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("events: sink closed") +) diff --git a/extender/code/vendor/github.com/docker/go-events/event.go b/extender/code/vendor/github.com/docker/go-events/event.go new file mode 100644 index 000000000..f0f1d9ea5 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-events/event.go @@ -0,0 +1,15 @@ +package events + +// Event marks items that can be sent as events. +type Event interface{} + +// Sink accepts and sends events. +type Sink interface { + // Write an event to the Sink. If no error is returned, the caller will + // assume that all events have been committed to the sink. If an error is + // received, the caller may retry sending the event. + Write(event Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/extender/code/vendor/github.com/docker/go-events/filter.go b/extender/code/vendor/github.com/docker/go-events/filter.go new file mode 100644 index 000000000..e6c0eb69d --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-events/filter.go @@ -0,0 +1,52 @@ +package events + +// Matcher matches events. +type Matcher interface { + Match(event Event) bool +} + +// MatcherFunc implements matcher with just a function. +type MatcherFunc func(event Event) bool + +// Match calls the wrapped function. +func (fn MatcherFunc) Match(event Event) bool { + return fn(event) +} + +// Filter provides an event sink that sends only events that are accepted by a +// Matcher. No methods on filter are goroutine safe. +type Filter struct { + dst Sink + matcher Matcher + closed bool +} + +// NewFilter returns a new filter that will send to events to dst that return +// true for Matcher. +func NewFilter(dst Sink, matcher Matcher) Sink { + return &Filter{dst: dst, matcher: matcher} +} + +// Write an event to the filter. +func (f *Filter) Write(event Event) error { + if f.closed { + return ErrSinkClosed + } + + if f.matcher.Match(event) { + return f.dst.Write(event) + } + + return nil +} + +// Close the filter and allow no more events to pass through. +func (f *Filter) Close() error { + // TODO(stevvooe): Not all sinks should have Close. + if f.closed { + return nil + } + + f.closed = true + return f.dst.Close() +} diff --git a/extender/code/vendor/github.com/docker/go-events/queue.go b/extender/code/vendor/github.com/docker/go-events/queue.go new file mode 100644 index 000000000..4bb770afc --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-events/queue.go @@ -0,0 +1,111 @@ +package events + +import ( + "container/list" + "sync" + + "github.com/sirupsen/logrus" +) + +// Queue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type Queue struct { + dst Sink + events *list.List + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// NewQueue returns a queue to the provided Sink dst. +func NewQueue(dst Sink) *Queue { + eq := Queue{ + dst: dst, + events: list.New(), + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// been closed. +func (eq *Queue) Write(event Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + eq.events.PushBack(event) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *Queue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return nil + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + return eq.dst.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *Queue) run() { + for { + event := eq.next() + + if event == nil { + return // nil block means event queue is closed. + } + + if err := eq.dst.Write(event); err != nil { + // TODO(aaronl): Dropping events could be bad depending + // on the application. We should have a way of + // communicating this condition. However, logging + // at a log level above debug may not be appropriate. + // Eventually, go-events should not use logrus at all, + // and should bubble up conditions like this through + // error values. + logrus.WithFields(logrus.Fields{ + "event": event, + "sink": eq.dst, + }).WithError(err).Debug("eventqueue: dropped event") + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *Queue) next() Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.(Event) + eq.events.Remove(front) + + return block +} diff --git a/extender/code/vendor/github.com/docker/go-events/retry.go b/extender/code/vendor/github.com/docker/go-events/retry.go new file mode 100644 index 000000000..2df55d216 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-events/retry.go @@ -0,0 +1,260 @@ +package events + +import ( + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" +) + +// RetryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Retry is configured with a RetryStrategy. Concurrent calls to a +// retrying sink are serialized through the sink, meaning that if one is +// in-flight, another will not proceed. +type RetryingSink struct { + sink Sink + strategy RetryStrategy + closed chan struct{} + once sync.Once +} + +// NewRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink { + rs := &RetryingSink{ + sink: sink, + strategy: strategy, + closed: make(chan struct{}), + } + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *RetryingSink) Write(event Event) error { + logger := logrus.WithField("event", event) + +retry: + select { + case <-rs.closed: + return ErrSinkClosed + default: + } + + if backoff := rs.strategy.Proceed(event); backoff > 0 { + select { + case <-time.After(backoff): + // TODO(stevvooe): This branch holds up the next try. Before, we + // would simply break to the "retry" label and then possibly wait + // again. However, this requires all retry strategies to have a + // large probability of probing the sync for success, rather than + // just backing off and sending the request. + case <-rs.closed: + return ErrSinkClosed + } + } + + if err := rs.sink.Write(event); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logger := logger.WithError(err) // shadow!! + + if rs.strategy.Failure(event, err) { + logger.Errorf("retryingsink: dropped event") + return nil + } + + logger.Errorf("retryingsink: error writing event, retrying") + goto retry + } + + rs.strategy.Success(event) + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *RetryingSink) Close() error { + rs.once.Do(func() { + close(rs.closed) + }) + + return nil +} + +func (rs *RetryingSink) String() string { + // Serialize a copy of the RetryingSink without the sync.Once, to avoid + // a data race. + rs2 := map[string]interface{}{ + "sink": rs.sink, + "strategy": rs.strategy, + "closed": rs.closed, + } + return fmt.Sprint(rs2) +} + +// RetryStrategy defines a strategy for retrying event sink writes. +// +// All methods should be goroutine safe. +type RetryStrategy interface { + // Proceed is called before every event send. If proceed returns a + // positive, non-zero integer, the retryer will back off by the provided + // duration. + // + // An event is provided, by may be ignored. + Proceed(event Event) time.Duration + + // Failure reports a failure to the strategy. If this method returns true, + // the event should be dropped. + Failure(event Event, err error) bool + + // Success should be called when an event is sent successfully. + Success(event Event) +} + +// Breaker implements a circuit breaker retry strategy. +// +// The current implementation never drops events. +type Breaker struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + mu sync.Mutex +} + +var _ RetryStrategy = &Breaker{} + +// NewBreaker returns a breaker that will backoff after the threshold has been +// tripped. A Breaker is thread safe and may be shared by many goroutines. +func NewBreaker(threshold int, backoff time.Duration) *Breaker { + return &Breaker{ + threshold: threshold, + backoff: backoff, + } +} + +// Proceed checks the failures against the threshold. +func (b *Breaker) Proceed(event Event) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + + if b.recent < b.threshold { + return 0 + } + + return b.last.Add(b.backoff).Sub(time.Now()) +} + +// Success resets the breaker. +func (b *Breaker) Success(event Event) { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent = 0 + b.last = time.Time{} +} + +// Failure records the failure and latest failure time. +func (b *Breaker) Failure(event Event, err error) bool { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent++ + b.last = time.Now().UTC() + return false // never drop events. +} + +var ( + // DefaultExponentialBackoffConfig provides a default configuration for + // exponential backoff. + DefaultExponentialBackoffConfig = ExponentialBackoffConfig{ + Base: time.Second, + Factor: time.Second, + Max: 20 * time.Second, + } +) + +// ExponentialBackoffConfig configures backoff parameters. +// +// Note that these parameters operate on the upper bound for choosing a random +// value. For example, at Base=1s, a random value in [0,1s) will be chosen for +// the backoff value. +type ExponentialBackoffConfig struct { + // Base is the minimum bound for backing off after failure. + Base time.Duration + + // Factor sets the amount of time by which the backoff grows with each + // failure. + Factor time.Duration + + // Max is the absolute maxiumum bound for a single backoff. + Max time.Duration +} + +// ExponentialBackoff implements random backoff with exponentially increasing +// bounds as the number consecutive failures increase. +type ExponentialBackoff struct { + config ExponentialBackoffConfig + failures uint64 // consecutive failure counter. +} + +// NewExponentialBackoff returns an exponential backoff strategy with the +// desired config. If config is nil, the default is returned. +func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff { + return &ExponentialBackoff{ + config: config, + } +} + +// Proceed returns the next randomly bound exponential backoff time. +func (b *ExponentialBackoff) Proceed(event Event) time.Duration { + return b.backoff(atomic.LoadUint64(&b.failures)) +} + +// Success resets the failures counter. +func (b *ExponentialBackoff) Success(event Event) { + atomic.StoreUint64(&b.failures, 0) +} + +// Failure increments the failure counter. +func (b *ExponentialBackoff) Failure(event Event, err error) bool { + atomic.AddUint64(&b.failures, 1) + return false +} + +// backoff calculates the amount of time to wait based on the number of +// consecutive failures. +func (b *ExponentialBackoff) backoff(failures uint64) time.Duration { + if failures <= 0 { + // proceed normally when there are no failures. + return 0 + } + + factor := b.config.Factor + if factor <= 0 { + factor = DefaultExponentialBackoffConfig.Factor + } + + backoff := b.config.Base + factor*time.Duration(1<<(failures-1)) + + max := b.config.Max + if max <= 0 { + max = DefaultExponentialBackoffConfig.Max + } + + if backoff > max || backoff < 0 { + backoff = max + } + + // Choose a uniformly distributed value from [0, backoff). + return time.Duration(rand.Int63n(int64(backoff))) +} diff --git a/extender/code/vendor/github.com/docker/go-metrics/CONTRIBUTING.md b/extender/code/vendor/github.com/docker/go-metrics/CONTRIBUTING.md new file mode 100644 index 000000000..b8a512c36 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing + +## Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/extender/code/vendor/github.com/docker/go-metrics/LICENSE.code b/extender/code/vendor/github.com/docker/go-metrics/LICENSE.code new file mode 100644 index 000000000..8f3fee627 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/docker/go-metrics/LICENSE.docs b/extender/code/vendor/github.com/docker/go-metrics/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/extender/code/vendor/github.com/docker/go-metrics/NOTICE b/extender/code/vendor/github.com/docker/go-metrics/NOTICE new file mode 100644 index 000000000..8915f0277 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/extender/code/vendor/github.com/docker/go-metrics/README.md b/extender/code/vendor/github.com/docker/go-metrics/README.md new file mode 100644 index 000000000..a9e947cb5 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/README.md @@ -0,0 +1,91 @@ +# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics) + +This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +## Best Practices + +This packages is meant to be used for collecting metrics in Docker projects. +It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected. +If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/). + +The following are a few Docker specific rules that will help you name and work with metrics in your project. + +1. Namespace and Subsystem + +This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics. + +```go +ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, +}) +``` + +In the example above we are creating metrics for the Docker engine's daemon package. +`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics. + +A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting. + +2. Declaring your Metrics + +Try to keep all your metric declarations in one file. +This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created. + +3. Use labels instead of multiple metrics + +Labels allow you to define one metric such as the time it takes to perform a certain action on an object. +If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action. + + +```go +containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action") +``` + +The last parameter is the label name or key. +When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for. + +```go +containerActions.WithValues("create").UpdateSince(start) +``` + +4. Always use a unit + +The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with. +For a timer, the standard unit is seconds and a counter's standard unit is a total. +For gauges you must provide the unit. +This package provides a standard set of units for use within the Docker projects. + +```go +Nanoseconds Unit = "nanoseconds" +Seconds Unit = "seconds" +Bytes Unit = "bytes" +Total Unit = "total" +``` + +If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds. + +## Docs + +Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). + +## HTTP Metrics + +To instrument a http handler, you can wrap the code like this: + +```go +namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"}) +httpMetrics := namespace.NewDefaultHttpMetrics() +metrics.Register(namespace) +instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler) +``` +Note: The `handler` label must be provided when a new namespace is created. + +## Additional Metrics + +Additional metrics are also defined here that are not available in the prometheus client. +If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. + + +## Copyright and license + +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/extender/code/vendor/github.com/docker/go-metrics/counter.go b/extender/code/vendor/github.com/docker/go-metrics/counter.go new file mode 100644 index 000000000..fe36316a4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/counter.go @@ -0,0 +1,52 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Counter is a metrics that can only increment its current count +type Counter interface { + // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. + // + // If len(vs) == 0, increments the counter by 1. + Inc(vs ...float64) +} + +// LabeledCounter is counter that must have labels populated before use. +type LabeledCounter interface { + WithValues(vs ...string) Counter +} + +type labeledCounter struct { + pc *prometheus.CounterVec +} + +func (lc *labeledCounter) WithValues(vs ...string) Counter { + return &counter{pc: lc.pc.WithLabelValues(vs...)} +} + +func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { + lc.pc.Describe(ch) +} + +func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { + lc.pc.Collect(ch) +} + +type counter struct { + pc prometheus.Counter +} + +func (c *counter) Inc(vs ...float64) { + if len(vs) == 0 { + c.pc.Inc() + } + + c.pc.Add(sumFloat64(vs...)) +} + +func (c *counter) Describe(ch chan<- *prometheus.Desc) { + c.pc.Describe(ch) +} + +func (c *counter) Collect(ch chan<- prometheus.Metric) { + c.pc.Collect(ch) +} diff --git a/extender/code/vendor/github.com/docker/go-metrics/docs.go b/extender/code/vendor/github.com/docker/go-metrics/docs.go new file mode 100644 index 000000000..8fbdfc697 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/docs.go @@ -0,0 +1,3 @@ +// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +package metrics diff --git a/extender/code/vendor/github.com/docker/go-metrics/gauge.go b/extender/code/vendor/github.com/docker/go-metrics/gauge.go new file mode 100644 index 000000000..74296e877 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/gauge.go @@ -0,0 +1,72 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Gauge is a metric that allows incrementing and decrementing a value +type Gauge interface { + Inc(...float64) + Dec(...float64) + + // Add adds the provided value to the gauge's current value + Add(float64) + + // Set replaces the gauge's current value with the provided value + Set(float64) +} + +// LabeledGauge describes a gauge the must have values populated before use. +type LabeledGauge interface { + WithValues(labels ...string) Gauge +} + +type labeledGauge struct { + pg *prometheus.GaugeVec +} + +func (lg *labeledGauge) WithValues(labels ...string) Gauge { + return &gauge{pg: lg.pg.WithLabelValues(labels...)} +} + +func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { + lg.pg.Describe(c) +} + +func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { + lg.pg.Collect(c) +} + +type gauge struct { + pg prometheus.Gauge +} + +func (g *gauge) Inc(vs ...float64) { + if len(vs) == 0 { + g.pg.Inc() + } + + g.Add(sumFloat64(vs...)) +} + +func (g *gauge) Dec(vs ...float64) { + if len(vs) == 0 { + g.pg.Dec() + } + + g.Add(-sumFloat64(vs...)) +} + +func (g *gauge) Add(v float64) { + g.pg.Add(v) +} + +func (g *gauge) Set(v float64) { + g.pg.Set(v) +} + +func (g *gauge) Describe(c chan<- *prometheus.Desc) { + g.pg.Describe(c) +} + +func (g *gauge) Collect(c chan<- prometheus.Metric) { + g.pg.Collect(c) +} diff --git a/extender/code/vendor/github.com/docker/go-metrics/handler.go b/extender/code/vendor/github.com/docker/go-metrics/handler.go new file mode 100644 index 000000000..05601e9ec --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/handler.go @@ -0,0 +1,74 @@ +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// HTTPHandlerOpts describes a set of configurable options of http metrics +type HTTPHandlerOpts struct { + DurationBuckets []float64 + RequestSizeBuckets []float64 + ResponseSizeBuckets []float64 +} + +const ( + InstrumentHandlerResponseSize = iota + InstrumentHandlerRequestSize + InstrumentHandlerDuration + InstrumentHandlerCounter + InstrumentHandlerInFlight +) + +type HTTPMetric struct { + prometheus.Collector + handlerType int +} + +var ( + defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60} + defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G + defaultResponseSizeBuckets = defaultRequestSizeBuckets +) + +// Handler returns the global http.Handler that provides the prometheus +// metrics format on GET requests. This handler is no longer instrumented. +func Handler() http.Handler { + return promhttp.Handler() +} + +func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(metrics, handler.ServeHTTP) +} + +func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc { + var handler http.Handler + handler = http.HandlerFunc(handlerFunc) + for _, metric := range metrics { + switch metric.handlerType { + case InstrumentHandlerResponseSize: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerResponseSize(collector, handler) + } + case InstrumentHandlerRequestSize: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerRequestSize(collector, handler) + } + case InstrumentHandlerDuration: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerDuration(collector, handler) + } + case InstrumentHandlerCounter: + if collector, ok := metric.Collector.(*prometheus.CounterVec); ok { + handler = promhttp.InstrumentHandlerCounter(collector, handler) + } + case InstrumentHandlerInFlight: + if collector, ok := metric.Collector.(prometheus.Gauge); ok { + handler = promhttp.InstrumentHandlerInFlight(collector, handler) + } + } + } + return handler.ServeHTTP +} diff --git a/extender/code/vendor/github.com/docker/go-metrics/helpers.go b/extender/code/vendor/github.com/docker/go-metrics/helpers.go new file mode 100644 index 000000000..68b7f51b3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/helpers.go @@ -0,0 +1,10 @@ +package metrics + +func sumFloat64(vs ...float64) float64 { + var sum float64 + for _, v := range vs { + sum += v + } + + return sum +} diff --git a/extender/code/vendor/github.com/docker/go-metrics/namespace.go b/extender/code/vendor/github.com/docker/go-metrics/namespace.go new file mode 100644 index 000000000..798315451 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/namespace.go @@ -0,0 +1,315 @@ +package metrics + +import ( + "fmt" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +type Labels map[string]string + +// NewNamespace returns a namespaces that is responsible for managing a collection of +// metrics for a particual namespace and subsystem +// +// labels allows const labels to be added to all metrics created in this namespace +// and are commonly used for data like application version and git commit +func NewNamespace(name, subsystem string, labels Labels) *Namespace { + if labels == nil { + labels = make(map[string]string) + } + return &Namespace{ + name: name, + subsystem: subsystem, + labels: labels, + } +} + +// Namespace describes a set of metrics that share a namespace and subsystem. +type Namespace struct { + name string + subsystem string + labels Labels + mu sync.Mutex + metrics []prometheus.Collector +} + +// WithConstLabels returns a namespace with the provided set of labels merged +// with the existing constant labels on the namespace. +// +// Only metrics created with the returned namespace will get the new constant +// labels. The returned namespace must be registered separately. +func (n *Namespace) WithConstLabels(labels Labels) *Namespace { + n.mu.Lock() + ns := &Namespace{ + name: n.name, + subsystem: n.subsystem, + labels: mergeLabels(n.labels, labels), + } + n.mu.Unlock() + return ns +} + +func (n *Namespace) NewCounter(name, help string) Counter { + c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} + n.Add(c) + return c +} + +func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { + c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} + n.Add(c) + return c +} + +func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Total), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewTimer(name, help string) Timer { + t := &timer{ + m: prometheus.NewHistogram(n.newTimerOpts(name, help)), + } + n.Add(t) + return t +} + +func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { + t := &labeledTimer{ + m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), + } + n.Add(t) + return t +} + +func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Seconds), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { + g := &gauge{ + pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), + } + n.Add(g) + return g +} + +func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { + g := &labeledGauge{ + pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), + } + n.Add(g) + return g +} + +func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, unit), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Describe(ch) + } +} + +func (n *Namespace) Collect(ch chan<- prometheus.Metric) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Collect(ch) + } +} + +func (n *Namespace) Add(collector prometheus.Collector) { + n.mu.Lock() + n.metrics = append(n.metrics, collector) + n.mu.Unlock() +} + +func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { + name = makeName(name, unit) + namespace := n.name + if n.subsystem != "" { + namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) + } + name = fmt.Sprintf("%s_%s", namespace, name) + return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) +} + +// mergeLabels merges two or more labels objects into a single map, favoring +// the later labels. +func mergeLabels(lbs ...Labels) Labels { + merged := make(Labels) + + for _, target := range lbs { + for k, v := range target { + merged[k] = v + } + } + + return merged +} + +func makeName(name string, unit Unit) string { + if unit == "" { + return name + } + + return fmt.Sprintf("%s_%s", name, unit) +} + +func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric { + return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ + DurationBuckets: defaultDurationBuckets, + RequestSizeBuckets: defaultResponseSizeBuckets, + ResponseSizeBuckets: defaultResponseSizeBuckets, + }) +} + +func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric { + return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ + DurationBuckets: durationBuckets, + RequestSizeBuckets: requestSizeBuckets, + ResponseSizeBuckets: responseSizeBuckets, + }) +} + +func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric { + var httpMetrics []*HTTPMetric + inFlightMetric := n.NewInFlightGaugeMetric(handlerName) + requestTotalMetric := n.NewRequestTotalMetric(handlerName) + requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets) + requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets) + responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets) + httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric) + return httpMetrics +} + +func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric { + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + metric := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "in_flight_requests", + Help: "The in-flight HTTP requests", + ConstLabels: prometheus.Labels(labels), + }) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerInFlight, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric { + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + metric := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: prometheus.Labels(labels), + }, + []string{"code", "method"}, + ) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerCounter, + } + n.Add(httpMetric) + return httpMetric +} +func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("DurationBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "request_duration_seconds", + Help: "The HTTP request latencies in seconds.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metric := prometheus.NewHistogramVec(opts, []string{"method"}) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerDuration, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("RequestSizeBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "request_size_bytes", + Help: "The HTTP request sizes in bytes.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metric := prometheus.NewHistogramVec(opts, []string{}) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerRequestSize, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("ResponseSizeBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "response_size_bytes", + Help: "The HTTP response sizes in bytes.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metrics := prometheus.NewHistogramVec(opts, []string{}) + httpMetric := &HTTPMetric{ + Collector: metrics, + handlerType: InstrumentHandlerResponseSize, + } + n.Add(httpMetric) + return httpMetric +} diff --git a/extender/code/vendor/github.com/docker/go-metrics/register.go b/extender/code/vendor/github.com/docker/go-metrics/register.go new file mode 100644 index 000000000..708358df0 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/register.go @@ -0,0 +1,15 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Register adds all the metrics in the provided namespace to the global +// metrics registry +func Register(n *Namespace) { + prometheus.MustRegister(n) +} + +// Deregister removes all the metrics in the provided namespace from the +// global metrics registry +func Deregister(n *Namespace) { + prometheus.Unregister(n) +} diff --git a/extender/code/vendor/github.com/docker/go-metrics/timer.go b/extender/code/vendor/github.com/docker/go-metrics/timer.go new file mode 100644 index 000000000..824c98739 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/timer.go @@ -0,0 +1,85 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// StartTimer begins a timer observation at the callsite. When the target +// operation is completed, the caller should call the return done func(). +func StartTimer(timer Timer) (done func()) { + start := time.Now() + return func() { + timer.Update(time.Since(start)) + } +} + +// Timer is a metric that allows collecting the duration of an action in seconds +type Timer interface { + // Update records an observation, duration, and converts to the target + // units. + Update(duration time.Duration) + + // UpdateSince will add the duration from the provided starting time to the + // timer's summary with the precisions that was used in creation of the timer + UpdateSince(time.Time) +} + +// LabeledTimer is a timer that must have label values populated before use. +type LabeledTimer interface { + WithValues(labels ...string) *labeledTimerObserver +} + +type labeledTimer struct { + m *prometheus.HistogramVec +} + +type labeledTimerObserver struct { + m prometheus.Observer +} + +func (lbo *labeledTimerObserver) Update(duration time.Duration) { + lbo.m.Observe(duration.Seconds()) +} + +func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { + lbo.m.Observe(time.Since(since).Seconds()) +} + +func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { + return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} +} + +func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { + lt.m.Describe(c) +} + +func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { + lt.m.Collect(c) +} + +type timer struct { + m prometheus.Observer +} + +func (t *timer) Update(duration time.Duration) { + t.m.Observe(duration.Seconds()) +} + +func (t *timer) UpdateSince(since time.Time) { + t.m.Observe(time.Since(since).Seconds()) +} + +func (t *timer) Describe(c chan<- *prometheus.Desc) { + c <- t.m.(prometheus.Metric).Desc() +} + +func (t *timer) Collect(c chan<- prometheus.Metric) { + // Are there any observers that don't implement Collector? It is really + // unclear what the point of the upstream change was, but we'll let this + // panic if we get an observer that doesn't implement collector. In this + // case, we should almost always see metricVec objects, so this should + // never panic. + t.m.(prometheus.Collector).Collect(c) +} diff --git a/extender/code/vendor/github.com/docker/go-metrics/unit.go b/extender/code/vendor/github.com/docker/go-metrics/unit.go new file mode 100644 index 000000000..c96622f90 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-metrics/unit.go @@ -0,0 +1,12 @@ +package metrics + +// Unit represents the type or precision of a metric that is appended to +// the metrics fully qualified name +type Unit string + +const ( + Nanoseconds Unit = "nanoseconds" + Seconds Unit = "seconds" + Bytes Unit = "bytes" + Total Unit = "total" +) diff --git a/extender/code/vendor/github.com/docker/go-units/CONTRIBUTING.md b/extender/code/vendor/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 000000000..9ea86d784 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/extender/code/vendor/github.com/docker/go-units/LICENSE b/extender/code/vendor/github.com/docker/go-units/LICENSE new file mode 100644 index 000000000..b55b37bc3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/docker/go-units/MAINTAINERS b/extender/code/vendor/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 000000000..4aac7c741 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,46 @@ +# go-units maintainers file +# +# This file describes who runs the docker/go-units project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "akihirosuda", + "dnephin", + "thajeztah", + "vdemeester", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "akihiro.suda.cz@hco.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" \ No newline at end of file diff --git a/extender/code/vendor/github.com/docker/go-units/README.md b/extender/code/vendor/github.com/docker/go-units/README.md new file mode 100644 index 000000000..4f70a4e13 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,16 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. + +go-units is licensed under the Apache License, Version 2.0. +See [LICENSE](LICENSE) for the full text of the license. diff --git a/extender/code/vendor/github.com/docker/go-units/circle.yml b/extender/code/vendor/github.com/docker/go-units/circle.yml new file mode 100644 index 000000000..af9d60552 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get golang.org/x/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/extender/code/vendor/github.com/docker/go-units/duration.go b/extender/code/vendor/github.com/docker/go-units/duration.go new file mode 100644 index 000000000..48dd8744d --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-units/duration.go @@ -0,0 +1,35 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds == 1 { + return "1 second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours() + 0.5); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*2 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/extender/code/vendor/github.com/docker/go-units/size.go b/extender/code/vendor/github.com/docker/go-units/size.go new file mode 100644 index 000000000..85f6ab071 --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-units/size.go @@ -0,0 +1,108 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { + i := 0 + unitsLimit := len(_map) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return size, _map[i] +} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + size, unit := getSizeAndUnit(size, base, _map) + return fmt.Sprintf(format, size, unit) +} + +// HumanSizeWithPrecision allows the size to be in any precision, +// instead of 4 digit precision used in units.HumanSize. +func HumanSizeWithPrecision(size float64, precision int) string { + size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) + return fmt.Sprintf("%.*g%s", precision, size, unit) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return HumanSizeWithPrecision(size, 4) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 4 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[3]) + if mul, ok := uMap[unitPrefix]; ok { + size *= float64(mul) + } + + return int64(size), nil +} diff --git a/extender/code/vendor/github.com/docker/go-units/ulimit.go b/extender/code/vendor/github.com/docker/go-units/ulimit.go new file mode 100644 index 000000000..fca0400cc --- /dev/null +++ b/extender/code/vendor/github.com/docker/go-units/ulimit.go @@ -0,0 +1,123 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if *hard != -1 { + if soft == -1 { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) + } + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/LICENSE b/extender/code/vendor/github.com/docker/swarmkit/LICENSE new file mode 100644 index 000000000..e2db6ed11 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2018 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/docker/swarmkit/agent/exec/controller.go b/extender/code/vendor/github.com/docker/swarmkit/agent/exec/controller.go new file mode 100644 index 000000000..c9e9343fd --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/agent/exec/controller.go @@ -0,0 +1,362 @@ +package exec + +import ( + "fmt" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/equality" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +// Controller controls execution of a task. +type Controller interface { + // Update the task definition seen by the controller. Will return + // ErrTaskUpdateFailed if the provided task definition changes fields that + // cannot be changed. + // + // Will be ignored if the task has exited. + Update(ctx context.Context, t *api.Task) error + + // Prepare the task for execution. This should ensure that all resources + // are created such that a call to start should execute immediately. + Prepare(ctx context.Context) error + + // Start the target and return when it has started successfully. + Start(ctx context.Context) error + + // Wait blocks until the target has exited. + Wait(ctx context.Context) error + + // Shutdown requests to exit the target gracefully. + Shutdown(ctx context.Context) error + + // Terminate the target. + Terminate(ctx context.Context) error + + // Remove all resources allocated by the controller. + Remove(ctx context.Context) error + + // Close closes any ephemeral resources associated with controller instance. + Close() error +} + +// ControllerLogs defines a component that makes logs accessible. +// +// Can usually be accessed on a controller instance via type assertion. +type ControllerLogs interface { + // Logs will write publisher until the context is cancelled or an error + // occurs. + Logs(ctx context.Context, publisher LogPublisher, options api.LogSubscriptionOptions) error +} + +// LogPublisher defines the protocol for receiving a log message. +type LogPublisher interface { + Publish(ctx context.Context, message api.LogMessage) error +} + +// LogPublisherFunc implements publisher with just a function. +type LogPublisherFunc func(ctx context.Context, message api.LogMessage) error + +// Publish calls the wrapped function. +func (fn LogPublisherFunc) Publish(ctx context.Context, message api.LogMessage) error { + return fn(ctx, message) +} + +// LogPublisherProvider defines the protocol for receiving a log publisher +type LogPublisherProvider interface { + Publisher(ctx context.Context, subscriptionID string) (LogPublisher, func(), error) +} + +// ContainerStatuser reports status of a container. +// +// This can be implemented by controllers or error types. +type ContainerStatuser interface { + // ContainerStatus returns the status of the target container, if + // available. When the container is not available, the status will be nil. + ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) +} + +// PortStatuser reports status of ports which are allocated by the executor +type PortStatuser interface { + // PortStatus returns the status on a list of PortConfigs + // which are managed at the host level by the controller. + PortStatus(ctx context.Context) (*api.PortStatus, error) +} + +// Resolve attempts to get a controller from the executor and reports the +// correct status depending on the tasks current state according to the result. +// +// Unlike Do, if an error is returned, the status should still be reported. The +// error merely reports the failure at getting the controller. +func Resolve(ctx context.Context, task *api.Task, executor Executor) (Controller, *api.TaskStatus, error) { + status := task.Status.Copy() + + defer func() { + logStateChange(ctx, task.DesiredState, task.Status.State, status.State) + }() + + ctlr, err := executor.Controller(task) + + // depending on the tasks state, a failed controller resolution has varying + // impact. The following expresses that impact. + if err != nil { + status.Message = "resolving controller failed" + status.Err = err.Error() + // before the task has been started, we consider it a rejection. + // if task is running, consider the task has failed + // otherwise keep the existing state + if task.Status.State < api.TaskStateStarting { + status.State = api.TaskStateRejected + } else if task.Status.State <= api.TaskStateRunning { + status.State = api.TaskStateFailed + } + } else if task.Status.State < api.TaskStateAccepted { + // we always want to proceed to accepted when we resolve the controller + status.Message = "accepted" + status.State = api.TaskStateAccepted + status.Err = "" + } + + return ctlr, status, err +} + +// Do progresses the task state using the controller performing a single +// operation on the controller. The return TaskStatus should be marked as the +// new state of the task. +// +// The returned status should be reported and placed back on to task +// before the next call. The operation can be cancelled by creating a +// cancelling context. +// +// Errors from the task controller will reported on the returned status. Any +// errors coming from this function should not be reported as related to the +// individual task. +// +// If ErrTaskNoop is returned, it means a second call to Do will result in no +// change. If ErrTaskDead is returned, calls to Do will no longer result in any +// action. +func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus, error) { + status := task.Status.Copy() + + // stay in the current state. + noop := func(errs ...error) (*api.TaskStatus, error) { + return status, ErrTaskNoop + } + + retry := func() (*api.TaskStatus, error) { + // while we retry on all errors, this allows us to explicitly declare + // retry cases. + return status, ErrTaskRetry + } + + // transition moves the task to the next state. + transition := func(state api.TaskState, msg string) (*api.TaskStatus, error) { + current := status.State + status.State = state + status.Message = msg + status.Err = "" + + if current > state { + panic("invalid state transition") + } + return status, nil + } + + // containerStatus exitCode keeps track of whether or not we've set it in + // this particular method. Eventually, we assemble this as part of a defer. + var ( + containerStatus *api.ContainerStatus + portStatus *api.PortStatus + exitCode int + ) + + // returned when a fatal execution of the task is fatal. In this case, we + // proceed to a terminal error state and set the appropriate fields. + // + // Common checks for the nature of an error should be included here. If the + // error is determined not to be fatal for the task, + fatal := func(err error) (*api.TaskStatus, error) { + if err == nil { + panic("err must not be nil when fatal") + } + + if cs, ok := err.(ContainerStatuser); ok { + var err error + containerStatus, err = cs.ContainerStatus(ctx) + if err != nil && !contextDoneError(err) { + log.G(ctx).WithError(err).Error("error resolving container status on fatal") + } + } + + // make sure we've set the *correct* exit code + if ec, ok := err.(ExitCoder); ok { + exitCode = ec.ExitCode() + } + + if cause := errors.Cause(err); cause == context.DeadlineExceeded || cause == context.Canceled { + return retry() + } + + status.Err = err.Error() // still reported on temporary + if IsTemporary(err) { + return retry() + } + + // only at this point do we consider the error fatal to the task. + log.G(ctx).WithError(err).Error("fatal task error") + + // NOTE(stevvooe): The following switch dictates the terminal failure + // state based on the state in which the failure was encountered. + switch { + case status.State < api.TaskStateStarting: + status.State = api.TaskStateRejected + case status.State >= api.TaskStateStarting: + status.State = api.TaskStateFailed + } + + return status, nil + } + + // below, we have several callbacks that are run after the state transition + // is completed. + defer func() { + logStateChange(ctx, task.DesiredState, task.Status.State, status.State) + + if !equality.TaskStatusesEqualStable(status, &task.Status) { + status.Timestamp = ptypes.MustTimestampProto(time.Now()) + } + }() + + // extract the container status from the container, if supported. + defer func() { + // only do this if in an active state + if status.State < api.TaskStateStarting { + return + } + + if containerStatus == nil { + // collect this, if we haven't + cctlr, ok := ctlr.(ContainerStatuser) + if !ok { + return + } + + var err error + containerStatus, err = cctlr.ContainerStatus(ctx) + if err != nil && !contextDoneError(err) { + log.G(ctx).WithError(err).Error("container status unavailable") + } + + // at this point, things have gone fairly wrong. Remain positive + // and let's get something out the door. + if containerStatus == nil { + containerStatus = new(api.ContainerStatus) + containerStatusTask := task.Status.GetContainer() + if containerStatusTask != nil { + *containerStatus = *containerStatusTask // copy it over. + } + } + } + + // at this point, we *must* have a containerStatus. + if exitCode != 0 { + containerStatus.ExitCode = int32(exitCode) + } + + status.RuntimeStatus = &api.TaskStatus_Container{ + Container: containerStatus, + } + + if portStatus == nil { + pctlr, ok := ctlr.(PortStatuser) + if !ok { + return + } + + var err error + portStatus, err = pctlr.PortStatus(ctx) + if err != nil && !contextDoneError(err) { + log.G(ctx).WithError(err).Error("container port status unavailable") + } + } + + status.PortStatus = portStatus + }() + + // this branch bounds the largest state achievable in the agent as SHUTDOWN, which + // is exactly the correct behavior for the agent. + if task.DesiredState >= api.TaskStateShutdown { + if status.State >= api.TaskStateCompleted { + return noop() + } + + if err := ctlr.Shutdown(ctx); err != nil { + return fatal(err) + } + + return transition(api.TaskStateShutdown, "shutdown") + } + + if status.State > task.DesiredState { + return noop() // way beyond desired state, pause + } + + // the following states may proceed past desired state. + switch status.State { + case api.TaskStatePreparing: + if err := ctlr.Prepare(ctx); err != nil && err != ErrTaskPrepared { + return fatal(err) + } + + return transition(api.TaskStateReady, "prepared") + case api.TaskStateStarting: + if err := ctlr.Start(ctx); err != nil && err != ErrTaskStarted { + return fatal(err) + } + + return transition(api.TaskStateRunning, "started") + case api.TaskStateRunning: + if err := ctlr.Wait(ctx); err != nil { + return fatal(err) + } + + return transition(api.TaskStateCompleted, "finished") + } + + // The following represent "pause" states. We can only proceed when the + // desired state is beyond our current state. + if status.State >= task.DesiredState { + return noop() + } + + switch status.State { + case api.TaskStateNew, api.TaskStatePending, api.TaskStateAssigned: + return transition(api.TaskStateAccepted, "accepted") + case api.TaskStateAccepted: + return transition(api.TaskStatePreparing, "preparing") + case api.TaskStateReady: + return transition(api.TaskStateStarting, "starting") + default: // terminal states + return noop() + } +} + +func logStateChange(ctx context.Context, desired, previous, next api.TaskState) { + if previous != next { + fields := logrus.Fields{ + "state.transition": fmt.Sprintf("%v->%v", previous, next), + "state.desired": desired, + } + log.G(ctx).WithFields(fields).Debug("state changed") + } +} + +func contextDoneError(err error) bool { + cause := errors.Cause(err) + return cause == context.Canceled || cause == context.DeadlineExceeded +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/agent/exec/controller_stub.go b/extender/code/vendor/github.com/docker/swarmkit/agent/exec/controller_stub.go new file mode 100644 index 000000000..076955ff8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/agent/exec/controller_stub.go @@ -0,0 +1,75 @@ +package exec + +import ( + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" + "runtime" + "strings" +) + +// StubController implements the Controller interface, +// but allows you to specify behaviors for each of its methods. +type StubController struct { + Controller + UpdateFn func(ctx context.Context, t *api.Task) error + PrepareFn func(ctx context.Context) error + StartFn func(ctx context.Context) error + WaitFn func(ctx context.Context) error + ShutdownFn func(ctx context.Context) error + TerminateFn func(ctx context.Context) error + RemoveFn func(ctx context.Context) error + CloseFn func() error + calls map[string]int + cstatus *api.ContainerStatus +} + +// NewStubController returns an initialized StubController +func NewStubController() *StubController { + return &StubController{ + calls: make(map[string]int), + } +} + +// If function A calls updateCountsForSelf, +// The callCount[A] value will be incremented +func (sc *StubController) called() { + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Failed to find caller of function") + } + // longName looks like 'github.com/docker/swarmkit/agent/exec.(*StubController).Prepare:1' + longName := runtime.FuncForPC(pc).Name() + parts := strings.Split(longName, ".") + tail := strings.Split(parts[len(parts)-1], ":") + sc.calls[tail[0]]++ +} + +// Update is part of the Controller interface +func (sc *StubController) Update(ctx context.Context, t *api.Task) error { + sc.called() + return sc.UpdateFn(ctx, t) +} + +// Prepare is part of the Controller interface +func (sc *StubController) Prepare(ctx context.Context) error { sc.called(); return sc.PrepareFn(ctx) } + +// Start is part of the Controller interface +func (sc *StubController) Start(ctx context.Context) error { sc.called(); return sc.StartFn(ctx) } + +// Wait is part of the Controller interface +func (sc *StubController) Wait(ctx context.Context) error { sc.called(); return sc.WaitFn(ctx) } + +// Shutdown is part of the Controller interface +func (sc *StubController) Shutdown(ctx context.Context) error { sc.called(); return sc.ShutdownFn(ctx) } + +// Terminate is part of the Controller interface +func (sc *StubController) Terminate(ctx context.Context) error { + sc.called() + return sc.TerminateFn(ctx) +} + +// Remove is part of the Controller interface +func (sc *StubController) Remove(ctx context.Context) error { sc.called(); return sc.RemoveFn(ctx) } + +// Close is part of the Controller interface +func (sc *StubController) Close() error { sc.called(); return sc.CloseFn() } diff --git a/extender/code/vendor/github.com/docker/swarmkit/agent/exec/errors.go b/extender/code/vendor/github.com/docker/swarmkit/agent/exec/errors.go new file mode 100644 index 000000000..af57e6b32 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/agent/exec/errors.go @@ -0,0 +1,82 @@ +package exec + +import "github.com/pkg/errors" + +var ( + // ErrRuntimeUnsupported encountered when a task requires a runtime + // unsupported by the executor. + ErrRuntimeUnsupported = errors.New("exec: unsupported runtime") + + // ErrTaskPrepared is called if the task is already prepared. + ErrTaskPrepared = errors.New("exec: task already prepared") + + // ErrTaskStarted can be returned from any operation that cannot be + // performed because the task has already been started. This does not imply + // that the task is running but rather that it is no longer valid to call + // Start. + ErrTaskStarted = errors.New("exec: task already started") + + // ErrTaskUpdateRejected is returned if a task update is rejected by a controller. + ErrTaskUpdateRejected = errors.New("exec: task update rejected") + + // ErrControllerClosed returned when a task controller has been closed. + ErrControllerClosed = errors.New("exec: controller closed") + + // ErrTaskRetry is returned by Do when an operation failed by should be + // retried. The status should still be reported in this case. + ErrTaskRetry = errors.New("exec: task retry") + + // ErrTaskNoop returns when the a subsequent call to Do will not result in + // advancing the task. Callers should avoid calling Do until the task has been updated. + ErrTaskNoop = errors.New("exec: task noop") +) + +// ExitCoder is implemented by errors that have an exit code. +type ExitCoder interface { + // ExitCode returns the exit code. + ExitCode() int +} + +// Temporary indicates whether or not the error condition is temporary. +// +// If this is encountered in the controller, the failing operation will be +// retried when this returns true. Otherwise, the operation is considered +// fatal. +type Temporary interface { + Temporary() bool +} + +// MakeTemporary makes the error temporary. +func MakeTemporary(err error) error { + if IsTemporary(err) { + return err + } + + return temporary{err} +} + +type temporary struct { + error +} + +func (t temporary) Cause() error { return t.error } +func (t temporary) Temporary() bool { return true } + +// IsTemporary returns true if the error or a recursive cause returns true for +// temporary. +func IsTemporary(err error) bool { + for err != nil { + if tmp, ok := err.(Temporary); ok && tmp.Temporary() { + return true + } + + cause := errors.Cause(err) + if cause == err { + break + } + + err = cause + } + + return false +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/agent/exec/executor.go b/extender/code/vendor/github.com/docker/swarmkit/agent/exec/executor.go new file mode 100644 index 000000000..8c3fd0350 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/agent/exec/executor.go @@ -0,0 +1,81 @@ +package exec + +import ( + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// Executor provides controllers for tasks. +type Executor interface { + // Describe returns the underlying node description. + Describe(ctx context.Context) (*api.NodeDescription, error) + + // Configure uses the node object state to propagate node + // state to the underlying executor. + Configure(ctx context.Context, node *api.Node) error + + // Controller provides a controller for the given task. + Controller(t *api.Task) (Controller, error) + + // SetNetworkBootstrapKeys passes the symmetric keys from the + // manager to the executor. + SetNetworkBootstrapKeys([]*api.EncryptionKey) error +} + +// SecretsProvider is implemented by objects that can store secrets, typically +// an executor. +type SecretsProvider interface { + Secrets() SecretsManager +} + +// ConfigsProvider is implemented by objects that can store configs, +// typically an executor. +type ConfigsProvider interface { + Configs() ConfigsManager +} + +// DependencyManager is a meta-object that can keep track of typed objects +// such as secrets and configs. +type DependencyManager interface { + SecretsProvider + ConfigsProvider +} + +// DependencyGetter is a meta-object that can provide access to typed objects +// such as secrets and configs. +type DependencyGetter interface { + Secrets() SecretGetter + Configs() ConfigGetter +} + +// SecretGetter contains secret data necessary for the Controller. +type SecretGetter interface { + // Get returns the the secret with a specific secret ID, if available. + // When the secret is not available, the return will be nil. + Get(secretID string) (*api.Secret, error) +} + +// SecretsManager is the interface for secret storage and updates. +type SecretsManager interface { + SecretGetter + + Add(secrets ...api.Secret) // add one or more secrets + Remove(secrets []string) // remove the secrets by ID + Reset() // remove all secrets +} + +// ConfigGetter contains config data necessary for the Controller. +type ConfigGetter interface { + // Get returns the the config with a specific config ID, if available. + // When the config is not available, the return will be nil. + Get(configID string) (*api.Config, error) +} + +// ConfigsManager is the interface for config storage and updates. +type ConfigsManager interface { + ConfigGetter + + Add(configs ...api.Config) // add one or more configs + Remove(configs []string) // remove the configs by ID + Reset() // remove all configs +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/README.md b/extender/code/vendor/github.com/docker/swarmkit/api/README.md new file mode 100644 index 000000000..a7ec3fc54 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/README.md @@ -0,0 +1,24 @@ +### Notice + +Do not change .pb.go files directly. You need to change the corresponding .proto files and run the following command to regenerate the .pb.go files. +``` +$ make generate +``` + +Click [here](https://github.com/google/protobuf) for more information about protobuf. + +The `api.pb.txt` file contains merged descriptors of all defined services and messages. +Definitions present here are considered frozen after the release. + +At release time, the current `api.pb.txt` file will be moved into place to +freeze the API changes for the minor version. For example, when 1.0.0 is +released, `api.pb.txt` should be moved to `1.0.txt`. Notice that we leave off +the patch number, since the API will be completely locked down for a given +patch series. + +We may find that by default, protobuf descriptors are too noisy to lock down +API changes. In that case, we may filter out certain fields in the descriptors, +possibly regenerating for old versions. + +This process is similar to the [process used to ensure backwards compatibility +in Go](https://github.com/golang/go/tree/master/api). diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/api.pb.txt b/extender/code/vendor/github.com/docker/swarmkit/api/api.pb.txt new file mode 100644 index 000000000..eb5efbba2 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/api.pb.txt @@ -0,0 +1,10067 @@ +file { + name: "google/protobuf/timestamp.proto" + package: "google.protobuf" + message_type { + name: "Timestamp" + field { + name: "seconds" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "seconds" + } + field { + name: "nanos" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "nanos" + } + } + options { + java_package: "com.google.protobuf" + java_outer_classname: "TimestampProto" + java_multiple_files: true + go_package: "github.com/golang/protobuf/ptypes/timestamp" + cc_enable_arenas: true + objc_class_prefix: "GPB" + csharp_namespace: "Google.Protobuf.WellKnownTypes" + } + syntax: "proto3" +} +file { + name: "google/protobuf/duration.proto" + package: "google.protobuf" + message_type { + name: "Duration" + field { + name: "seconds" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "seconds" + } + field { + name: "nanos" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "nanos" + } + } + options { + java_package: "com.google.protobuf" + java_outer_classname: "DurationProto" + java_multiple_files: true + go_package: "github.com/golang/protobuf/ptypes/duration" + cc_enable_arenas: true + objc_class_prefix: "GPB" + csharp_namespace: "Google.Protobuf.WellKnownTypes" + } + syntax: "proto3" +} +file { + name: "google/protobuf/descriptor.proto" + package: "google.protobuf" + message_type { + name: "FileDescriptorSet" + field { + name: "file" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.FileDescriptorProto" + json_name: "file" + } + } + message_type { + name: "FileDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "package" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "package" + } + field { + name: "dependency" + number: 3 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "dependency" + } + field { + name: "public_dependency" + number: 10 + label: LABEL_REPEATED + type: TYPE_INT32 + json_name: "publicDependency" + } + field { + name: "weak_dependency" + number: 11 + label: LABEL_REPEATED + type: TYPE_INT32 + json_name: "weakDependency" + } + field { + name: "message_type" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.DescriptorProto" + json_name: "messageType" + } + field { + name: "enum_type" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.EnumDescriptorProto" + json_name: "enumType" + } + field { + name: "service" + number: 6 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.ServiceDescriptorProto" + json_name: "service" + } + field { + name: "extension" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.FieldDescriptorProto" + json_name: "extension" + } + field { + name: "options" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.FileOptions" + json_name: "options" + } + field { + name: "source_code_info" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.SourceCodeInfo" + json_name: "sourceCodeInfo" + } + field { + name: "syntax" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "syntax" + } + } + message_type { + name: "DescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "field" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.FieldDescriptorProto" + json_name: "field" + } + field { + name: "extension" + number: 6 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.FieldDescriptorProto" + json_name: "extension" + } + field { + name: "nested_type" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.DescriptorProto" + json_name: "nestedType" + } + field { + name: "enum_type" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.EnumDescriptorProto" + json_name: "enumType" + } + field { + name: "extension_range" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.DescriptorProto.ExtensionRange" + json_name: "extensionRange" + } + field { + name: "oneof_decl" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.OneofDescriptorProto" + json_name: "oneofDecl" + } + field { + name: "options" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.MessageOptions" + json_name: "options" + } + field { + name: "reserved_range" + number: 9 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.DescriptorProto.ReservedRange" + json_name: "reservedRange" + } + field { + name: "reserved_name" + number: 10 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "reservedName" + } + nested_type { + name: "ExtensionRange" + field { + name: "start" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "start" + } + field { + name: "end" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "end" + } + field { + name: "options" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.ExtensionRangeOptions" + json_name: "options" + } + } + nested_type { + name: "ReservedRange" + field { + name: "start" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "start" + } + field { + name: "end" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "end" + } + } + } + message_type { + name: "ExtensionRangeOptions" + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + extension_range { + start: 1000 + end: 536870912 + } + } + message_type { + name: "FieldDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "number" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "number" + } + field { + name: "label" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".google.protobuf.FieldDescriptorProto.Label" + json_name: "label" + } + field { + name: "type" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".google.protobuf.FieldDescriptorProto.Type" + json_name: "type" + } + field { + name: "type_name" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "typeName" + } + field { + name: "extendee" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "extendee" + } + field { + name: "default_value" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "defaultValue" + } + field { + name: "oneof_index" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "oneofIndex" + } + field { + name: "json_name" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "jsonName" + } + field { + name: "options" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.FieldOptions" + json_name: "options" + } + enum_type { + name: "Type" + value { + name: "TYPE_DOUBLE" + number: 1 + } + value { + name: "TYPE_FLOAT" + number: 2 + } + value { + name: "TYPE_INT64" + number: 3 + } + value { + name: "TYPE_UINT64" + number: 4 + } + value { + name: "TYPE_INT32" + number: 5 + } + value { + name: "TYPE_FIXED64" + number: 6 + } + value { + name: "TYPE_FIXED32" + number: 7 + } + value { + name: "TYPE_BOOL" + number: 8 + } + value { + name: "TYPE_STRING" + number: 9 + } + value { + name: "TYPE_GROUP" + number: 10 + } + value { + name: "TYPE_MESSAGE" + number: 11 + } + value { + name: "TYPE_BYTES" + number: 12 + } + value { + name: "TYPE_UINT32" + number: 13 + } + value { + name: "TYPE_ENUM" + number: 14 + } + value { + name: "TYPE_SFIXED32" + number: 15 + } + value { + name: "TYPE_SFIXED64" + number: 16 + } + value { + name: "TYPE_SINT32" + number: 17 + } + value { + name: "TYPE_SINT64" + number: 18 + } + } + enum_type { + name: "Label" + value { + name: "LABEL_OPTIONAL" + number: 1 + } + value { + name: "LABEL_REQUIRED" + number: 2 + } + value { + name: "LABEL_REPEATED" + number: 3 + } + } + } + message_type { + name: "OneofDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "options" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.OneofOptions" + json_name: "options" + } + } + message_type { + name: "EnumDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "value" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.EnumValueDescriptorProto" + json_name: "value" + } + field { + name: "options" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.EnumOptions" + json_name: "options" + } + field { + name: "reserved_range" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.EnumDescriptorProto.EnumReservedRange" + json_name: "reservedRange" + } + field { + name: "reserved_name" + number: 5 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "reservedName" + } + nested_type { + name: "EnumReservedRange" + field { + name: "start" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "start" + } + field { + name: "end" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "end" + } + } + } + message_type { + name: "EnumValueDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "number" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "number" + } + field { + name: "options" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.EnumValueOptions" + json_name: "options" + } + } + message_type { + name: "ServiceDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "method" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.MethodDescriptorProto" + json_name: "method" + } + field { + name: "options" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.ServiceOptions" + json_name: "options" + } + } + message_type { + name: "MethodDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "input_type" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "inputType" + } + field { + name: "output_type" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "outputType" + } + field { + name: "options" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.MethodOptions" + json_name: "options" + } + field { + name: "client_streaming" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "clientStreaming" + } + field { + name: "server_streaming" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "serverStreaming" + } + } + message_type { + name: "FileOptions" + field { + name: "java_package" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "javaPackage" + } + field { + name: "java_outer_classname" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "javaOuterClassname" + } + field { + name: "java_multiple_files" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "javaMultipleFiles" + } + field { + name: "java_generate_equals_and_hash" + number: 20 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + deprecated: true + } + json_name: "javaGenerateEqualsAndHash" + } + field { + name: "java_string_check_utf8" + number: 27 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "javaStringCheckUtf8" + } + field { + name: "optimize_for" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".google.protobuf.FileOptions.OptimizeMode" + default_value: "SPEED" + json_name: "optimizeFor" + } + field { + name: "go_package" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "goPackage" + } + field { + name: "cc_generic_services" + number: 16 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "ccGenericServices" + } + field { + name: "java_generic_services" + number: 17 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "javaGenericServices" + } + field { + name: "py_generic_services" + number: 18 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "pyGenericServices" + } + field { + name: "php_generic_services" + number: 42 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "phpGenericServices" + } + field { + name: "deprecated" + number: 23 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "cc_enable_arenas" + number: 31 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "ccEnableArenas" + } + field { + name: "objc_class_prefix" + number: 36 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "objcClassPrefix" + } + field { + name: "csharp_namespace" + number: 37 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "csharpNamespace" + } + field { + name: "swift_prefix" + number: 39 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "swiftPrefix" + } + field { + name: "php_class_prefix" + number: 40 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "phpClassPrefix" + } + field { + name: "php_namespace" + number: 41 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "phpNamespace" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + enum_type { + name: "OptimizeMode" + value { + name: "SPEED" + number: 1 + } + value { + name: "CODE_SIZE" + number: 2 + } + value { + name: "LITE_RUNTIME" + number: 3 + } + } + extension_range { + start: 1000 + end: 536870912 + } + reserved_range { + start: 38 + end: 39 + } + } + message_type { + name: "MessageOptions" + field { + name: "message_set_wire_format" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "messageSetWireFormat" + } + field { + name: "no_standard_descriptor_accessor" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "noStandardDescriptorAccessor" + } + field { + name: "deprecated" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "map_entry" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "mapEntry" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + extension_range { + start: 1000 + end: 536870912 + } + reserved_range { + start: 8 + end: 9 + } + reserved_range { + start: 9 + end: 10 + } + } + message_type { + name: "FieldOptions" + field { + name: "ctype" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".google.protobuf.FieldOptions.CType" + default_value: "STRING" + json_name: "ctype" + } + field { + name: "packed" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "packed" + } + field { + name: "jstype" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".google.protobuf.FieldOptions.JSType" + default_value: "JS_NORMAL" + json_name: "jstype" + } + field { + name: "lazy" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "lazy" + } + field { + name: "deprecated" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "weak" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "weak" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + enum_type { + name: "CType" + value { + name: "STRING" + number: 0 + } + value { + name: "CORD" + number: 1 + } + value { + name: "STRING_PIECE" + number: 2 + } + } + enum_type { + name: "JSType" + value { + name: "JS_NORMAL" + number: 0 + } + value { + name: "JS_STRING" + number: 1 + } + value { + name: "JS_NUMBER" + number: 2 + } + } + extension_range { + start: 1000 + end: 536870912 + } + reserved_range { + start: 4 + end: 5 + } + } + message_type { + name: "OneofOptions" + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + extension_range { + start: 1000 + end: 536870912 + } + } + message_type { + name: "EnumOptions" + field { + name: "allow_alias" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "allowAlias" + } + field { + name: "deprecated" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + extension_range { + start: 1000 + end: 536870912 + } + reserved_range { + start: 5 + end: 6 + } + } + message_type { + name: "EnumValueOptions" + field { + name: "deprecated" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + extension_range { + start: 1000 + end: 536870912 + } + } + message_type { + name: "ServiceOptions" + field { + name: "deprecated" + number: 33 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + extension_range { + start: 1000 + end: 536870912 + } + } + message_type { + name: "MethodOptions" + field { + name: "deprecated" + number: 33 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "idempotency_level" + number: 34 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".google.protobuf.MethodOptions.IdempotencyLevel" + default_value: "IDEMPOTENCY_UNKNOWN" + json_name: "idempotencyLevel" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + enum_type { + name: "IdempotencyLevel" + value { + name: "IDEMPOTENCY_UNKNOWN" + number: 0 + } + value { + name: "NO_SIDE_EFFECTS" + number: 1 + } + value { + name: "IDEMPOTENT" + number: 2 + } + } + extension_range { + start: 1000 + end: 536870912 + } + } + message_type { + name: "UninterpretedOption" + field { + name: "name" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption.NamePart" + json_name: "name" + } + field { + name: "identifier_value" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "identifierValue" + } + field { + name: "positive_int_value" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "positiveIntValue" + } + field { + name: "negative_int_value" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "negativeIntValue" + } + field { + name: "double_value" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_DOUBLE + json_name: "doubleValue" + } + field { + name: "string_value" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "stringValue" + } + field { + name: "aggregate_value" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "aggregateValue" + } + nested_type { + name: "NamePart" + field { + name: "name_part" + number: 1 + label: LABEL_REQUIRED + type: TYPE_STRING + json_name: "namePart" + } + field { + name: "is_extension" + number: 2 + label: LABEL_REQUIRED + type: TYPE_BOOL + json_name: "isExtension" + } + } + } + message_type { + name: "SourceCodeInfo" + field { + name: "location" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.SourceCodeInfo.Location" + json_name: "location" + } + nested_type { + name: "Location" + field { + name: "path" + number: 1 + label: LABEL_REPEATED + type: TYPE_INT32 + options { + packed: true + } + json_name: "path" + } + field { + name: "span" + number: 2 + label: LABEL_REPEATED + type: TYPE_INT32 + options { + packed: true + } + json_name: "span" + } + field { + name: "leading_comments" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "leadingComments" + } + field { + name: "trailing_comments" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "trailingComments" + } + field { + name: "leading_detached_comments" + number: 6 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "leadingDetachedComments" + } + } + } + message_type { + name: "GeneratedCodeInfo" + field { + name: "annotation" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.GeneratedCodeInfo.Annotation" + json_name: "annotation" + } + nested_type { + name: "Annotation" + field { + name: "path" + number: 1 + label: LABEL_REPEATED + type: TYPE_INT32 + options { + packed: true + } + json_name: "path" + } + field { + name: "source_file" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sourceFile" + } + field { + name: "begin" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "begin" + } + field { + name: "end" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "end" + } + } + } + options { + java_package: "com.google.protobuf" + java_outer_classname: "DescriptorProtos" + optimize_for: SPEED + go_package: "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor" + cc_enable_arenas: true + objc_class_prefix: "GPB" + csharp_namespace: "Google.Protobuf.Reflection" + } +} +file { + name: "gogoproto/gogo.proto" + package: "gogoproto" + dependency: "google/protobuf/descriptor.proto" + extension { + name: "goproto_enum_prefix" + extendee: ".google.protobuf.EnumOptions" + number: 62001 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoEnumPrefix" + } + extension { + name: "goproto_enum_stringer" + extendee: ".google.protobuf.EnumOptions" + number: 62021 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoEnumStringer" + } + extension { + name: "enum_stringer" + extendee: ".google.protobuf.EnumOptions" + number: 62022 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "enumStringer" + } + extension { + name: "enum_customname" + extendee: ".google.protobuf.EnumOptions" + number: 62023 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "enumCustomname" + } + extension { + name: "enumdecl" + extendee: ".google.protobuf.EnumOptions" + number: 62024 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "enumdecl" + } + extension { + name: "enumvalue_customname" + extendee: ".google.protobuf.EnumValueOptions" + number: 66001 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "enumvalueCustomname" + } + extension { + name: "goproto_getters_all" + extendee: ".google.protobuf.FileOptions" + number: 63001 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoGettersAll" + } + extension { + name: "goproto_enum_prefix_all" + extendee: ".google.protobuf.FileOptions" + number: 63002 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoEnumPrefixAll" + } + extension { + name: "goproto_stringer_all" + extendee: ".google.protobuf.FileOptions" + number: 63003 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoStringerAll" + } + extension { + name: "verbose_equal_all" + extendee: ".google.protobuf.FileOptions" + number: 63004 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "verboseEqualAll" + } + extension { + name: "face_all" + extendee: ".google.protobuf.FileOptions" + number: 63005 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "faceAll" + } + extension { + name: "gostring_all" + extendee: ".google.protobuf.FileOptions" + number: 63006 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "gostringAll" + } + extension { + name: "populate_all" + extendee: ".google.protobuf.FileOptions" + number: 63007 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "populateAll" + } + extension { + name: "stringer_all" + extendee: ".google.protobuf.FileOptions" + number: 63008 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "stringerAll" + } + extension { + name: "onlyone_all" + extendee: ".google.protobuf.FileOptions" + number: 63009 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "onlyoneAll" + } + extension { + name: "equal_all" + extendee: ".google.protobuf.FileOptions" + number: 63013 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "equalAll" + } + extension { + name: "description_all" + extendee: ".google.protobuf.FileOptions" + number: 63014 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "descriptionAll" + } + extension { + name: "testgen_all" + extendee: ".google.protobuf.FileOptions" + number: 63015 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "testgenAll" + } + extension { + name: "benchgen_all" + extendee: ".google.protobuf.FileOptions" + number: 63016 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "benchgenAll" + } + extension { + name: "marshaler_all" + extendee: ".google.protobuf.FileOptions" + number: 63017 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "marshalerAll" + } + extension { + name: "unmarshaler_all" + extendee: ".google.protobuf.FileOptions" + number: 63018 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "unmarshalerAll" + } + extension { + name: "stable_marshaler_all" + extendee: ".google.protobuf.FileOptions" + number: 63019 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "stableMarshalerAll" + } + extension { + name: "sizer_all" + extendee: ".google.protobuf.FileOptions" + number: 63020 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "sizerAll" + } + extension { + name: "goproto_enum_stringer_all" + extendee: ".google.protobuf.FileOptions" + number: 63021 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoEnumStringerAll" + } + extension { + name: "enum_stringer_all" + extendee: ".google.protobuf.FileOptions" + number: 63022 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "enumStringerAll" + } + extension { + name: "unsafe_marshaler_all" + extendee: ".google.protobuf.FileOptions" + number: 63023 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "unsafeMarshalerAll" + } + extension { + name: "unsafe_unmarshaler_all" + extendee: ".google.protobuf.FileOptions" + number: 63024 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "unsafeUnmarshalerAll" + } + extension { + name: "goproto_extensions_map_all" + extendee: ".google.protobuf.FileOptions" + number: 63025 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoExtensionsMapAll" + } + extension { + name: "goproto_unrecognized_all" + extendee: ".google.protobuf.FileOptions" + number: 63026 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoUnrecognizedAll" + } + extension { + name: "gogoproto_import" + extendee: ".google.protobuf.FileOptions" + number: 63027 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "gogoprotoImport" + } + extension { + name: "protosizer_all" + extendee: ".google.protobuf.FileOptions" + number: 63028 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "protosizerAll" + } + extension { + name: "compare_all" + extendee: ".google.protobuf.FileOptions" + number: 63029 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "compareAll" + } + extension { + name: "typedecl_all" + extendee: ".google.protobuf.FileOptions" + number: 63030 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "typedeclAll" + } + extension { + name: "enumdecl_all" + extendee: ".google.protobuf.FileOptions" + number: 63031 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "enumdeclAll" + } + extension { + name: "goproto_registration" + extendee: ".google.protobuf.FileOptions" + number: 63032 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoRegistration" + } + extension { + name: "goproto_getters" + extendee: ".google.protobuf.MessageOptions" + number: 64001 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoGetters" + } + extension { + name: "goproto_stringer" + extendee: ".google.protobuf.MessageOptions" + number: 64003 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoStringer" + } + extension { + name: "verbose_equal" + extendee: ".google.protobuf.MessageOptions" + number: 64004 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "verboseEqual" + } + extension { + name: "face" + extendee: ".google.protobuf.MessageOptions" + number: 64005 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "face" + } + extension { + name: "gostring" + extendee: ".google.protobuf.MessageOptions" + number: 64006 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "gostring" + } + extension { + name: "populate" + extendee: ".google.protobuf.MessageOptions" + number: 64007 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "populate" + } + extension { + name: "stringer" + extendee: ".google.protobuf.MessageOptions" + number: 67008 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "stringer" + } + extension { + name: "onlyone" + extendee: ".google.protobuf.MessageOptions" + number: 64009 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "onlyone" + } + extension { + name: "equal" + extendee: ".google.protobuf.MessageOptions" + number: 64013 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "equal" + } + extension { + name: "description" + extendee: ".google.protobuf.MessageOptions" + number: 64014 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "description" + } + extension { + name: "testgen" + extendee: ".google.protobuf.MessageOptions" + number: 64015 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "testgen" + } + extension { + name: "benchgen" + extendee: ".google.protobuf.MessageOptions" + number: 64016 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "benchgen" + } + extension { + name: "marshaler" + extendee: ".google.protobuf.MessageOptions" + number: 64017 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "marshaler" + } + extension { + name: "unmarshaler" + extendee: ".google.protobuf.MessageOptions" + number: 64018 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "unmarshaler" + } + extension { + name: "stable_marshaler" + extendee: ".google.protobuf.MessageOptions" + number: 64019 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "stableMarshaler" + } + extension { + name: "sizer" + extendee: ".google.protobuf.MessageOptions" + number: 64020 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "sizer" + } + extension { + name: "unsafe_marshaler" + extendee: ".google.protobuf.MessageOptions" + number: 64023 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "unsafeMarshaler" + } + extension { + name: "unsafe_unmarshaler" + extendee: ".google.protobuf.MessageOptions" + number: 64024 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "unsafeUnmarshaler" + } + extension { + name: "goproto_extensions_map" + extendee: ".google.protobuf.MessageOptions" + number: 64025 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoExtensionsMap" + } + extension { + name: "goproto_unrecognized" + extendee: ".google.protobuf.MessageOptions" + number: 64026 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoUnrecognized" + } + extension { + name: "protosizer" + extendee: ".google.protobuf.MessageOptions" + number: 64028 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "protosizer" + } + extension { + name: "compare" + extendee: ".google.protobuf.MessageOptions" + number: 64029 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "compare" + } + extension { + name: "typedecl" + extendee: ".google.protobuf.MessageOptions" + number: 64030 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "typedecl" + } + extension { + name: "nullable" + extendee: ".google.protobuf.FieldOptions" + number: 65001 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "nullable" + } + extension { + name: "embed" + extendee: ".google.protobuf.FieldOptions" + number: 65002 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "embed" + } + extension { + name: "customtype" + extendee: ".google.protobuf.FieldOptions" + number: 65003 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "customtype" + } + extension { + name: "customname" + extendee: ".google.protobuf.FieldOptions" + number: 65004 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "customname" + } + extension { + name: "jsontag" + extendee: ".google.protobuf.FieldOptions" + number: 65005 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "jsontag" + } + extension { + name: "moretags" + extendee: ".google.protobuf.FieldOptions" + number: 65006 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "moretags" + } + extension { + name: "casttype" + extendee: ".google.protobuf.FieldOptions" + number: 65007 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "casttype" + } + extension { + name: "castkey" + extendee: ".google.protobuf.FieldOptions" + number: 65008 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "castkey" + } + extension { + name: "castvalue" + extendee: ".google.protobuf.FieldOptions" + number: 65009 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "castvalue" + } + extension { + name: "stdtime" + extendee: ".google.protobuf.FieldOptions" + number: 65010 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "stdtime" + } + extension { + name: "stdduration" + extendee: ".google.protobuf.FieldOptions" + number: 65011 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "stdduration" + } + options { + java_package: "com.google.protobuf" + java_outer_classname: "GoGoProtos" + go_package: "github.com/gogo/protobuf/gogoproto" + } +} +file { + name: "github.com/docker/swarmkit/api/types.proto" + package: "docker.swarmkit.v1" + dependency: "google/protobuf/timestamp.proto" + dependency: "google/protobuf/duration.proto" + dependency: "gogoproto/gogo.proto" + message_type { + name: "Version" + field { + name: "index" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "index" + } + } + message_type { + name: "IndexEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "val" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "val" + } + } + message_type { + name: "Annotations" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "labels" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations.LabelsEntry" + json_name: "labels" + } + field { + name: "indices" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IndexEntry" + options { + 65001: 0 + } + json_name: "indices" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + message_type { + name: "NamedGenericResource" + field { + name: "kind" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "kind" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + } + message_type { + name: "DiscreteGenericResource" + field { + name: "kind" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "kind" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "value" + } + } + message_type { + name: "GenericResource" + field { + name: "named_resource_spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NamedGenericResource" + oneof_index: 0 + json_name: "namedResourceSpec" + } + field { + name: "discrete_resource_spec" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.DiscreteGenericResource" + oneof_index: 0 + json_name: "discreteResourceSpec" + } + oneof_decl { + name: "resource" + } + } + message_type { + name: "Resources" + field { + name: "nano_cpus" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT64 + options { + 65004: "NanoCPUs" + } + json_name: "nanoCpus" + } + field { + name: "memory_bytes" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "memoryBytes" + } + field { + name: "generic" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.GenericResource" + json_name: "generic" + } + } + message_type { + name: "ResourceRequirements" + field { + name: "limits" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Resources" + json_name: "limits" + } + field { + name: "reservations" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Resources" + json_name: "reservations" + } + } + message_type { + name: "Platform" + field { + name: "architecture" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "architecture" + } + field { + name: "os" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "OS" + } + json_name: "os" + } + } + message_type { + name: "PluginDescription" + field { + name: "type" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "type" + } + field { + name: "name" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + } + message_type { + name: "EngineDescription" + field { + name: "engine_version" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "engineVersion" + } + field { + name: "labels" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EngineDescription.LabelsEntry" + json_name: "labels" + } + field { + name: "plugins" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.PluginDescription" + options { + 65001: 0 + } + json_name: "plugins" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + message_type { + name: "NodeDescription" + field { + name: "hostname" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "hostname" + } + field { + name: "platform" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Platform" + json_name: "platform" + } + field { + name: "resources" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Resources" + json_name: "resources" + } + field { + name: "engine" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EngineDescription" + json_name: "engine" + } + field { + name: "tls_info" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NodeTLSInfo" + options { + 65004: "TLSInfo" + } + json_name: "tlsInfo" + } + field { + name: "fips" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + 65004: "FIPS" + } + json_name: "fips" + } + } + message_type { + name: "NodeTLSInfo" + field { + name: "trust_root" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "trustRoot" + } + field { + name: "cert_issuer_subject" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "certIssuerSubject" + } + field { + name: "cert_issuer_public_key" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "certIssuerPublicKey" + } + } + message_type { + name: "RaftMemberStatus" + field { + name: "leader" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "leader" + } + field { + name: "reachability" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.RaftMemberStatus.Reachability" + json_name: "reachability" + } + field { + name: "message" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "message" + } + enum_type { + name: "Reachability" + value { + name: "UNKNOWN" + number: 0 + } + value { + name: "UNREACHABLE" + number: 1 + } + value { + name: "REACHABLE" + number: 2 + } + } + } + message_type { + name: "NodeStatus" + field { + name: "state" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeStatus.State" + json_name: "state" + } + field { + name: "message" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "message" + } + field { + name: "addr" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + enum_type { + name: "State" + value { + name: "UNKNOWN" + number: 0 + } + value { + name: "DOWN" + number: 1 + } + value { + name: "READY" + number: 2 + } + value { + name: "DISCONNECTED" + number: 3 + } + } + } + message_type { + name: "Image" + field { + name: "reference" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "reference" + } + } + message_type { + name: "Mount" + field { + name: "type" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.Mount.Type" + json_name: "type" + } + field { + name: "source" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "source" + } + field { + name: "target" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "target" + } + field { + name: "readonly" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + 65004: "ReadOnly" + } + json_name: "readonly" + } + field { + name: "consistency" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.Mount.Consistency" + json_name: "consistency" + } + field { + name: "bind_options" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Mount.BindOptions" + json_name: "bindOptions" + } + field { + name: "volume_options" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Mount.VolumeOptions" + json_name: "volumeOptions" + } + field { + name: "tmpfs_options" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Mount.TmpfsOptions" + json_name: "tmpfsOptions" + } + nested_type { + name: "BindOptions" + field { + name: "propagation" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.Mount.BindOptions.Propagation" + json_name: "propagation" + } + enum_type { + name: "Propagation" + value { + name: "RPRIVATE" + number: 0 + options { + 66001: "MountPropagationRPrivate" + } + } + value { + name: "PRIVATE" + number: 1 + options { + 66001: "MountPropagationPrivate" + } + } + value { + name: "RSHARED" + number: 2 + options { + 66001: "MountPropagationRShared" + } + } + value { + name: "SHARED" + number: 3 + options { + 66001: "MountPropagationShared" + } + } + value { + name: "RSLAVE" + number: 4 + options { + 66001: "MountPropagationRSlave" + } + } + value { + name: "SLAVE" + number: 5 + options { + 66001: "MountPropagationSlave" + } + } + options { + 62001: 0 + 62023: "MountPropagation" + } + } + } + nested_type { + name: "VolumeOptions" + field { + name: "nocopy" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + 65004: "NoCopy" + } + json_name: "nocopy" + } + field { + name: "labels" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Mount.VolumeOptions.LabelsEntry" + json_name: "labels" + } + field { + name: "driver_config" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "driverConfig" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + nested_type { + name: "TmpfsOptions" + field { + name: "size_bytes" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "sizeBytes" + } + field { + name: "mode" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + options { + 65003: "os.FileMode" + 65001: 0 + } + json_name: "mode" + } + } + enum_type { + name: "Type" + value { + name: "BIND" + number: 0 + options { + 66001: "MountTypeBind" + } + } + value { + name: "VOLUME" + number: 1 + options { + 66001: "MountTypeVolume" + } + } + value { + name: "TMPFS" + number: 2 + options { + 66001: "MountTypeTmpfs" + } + } + options { + 62001: 0 + 62023: "MountType" + } + } + enum_type { + name: "Consistency" + value { + name: "DEFAULT" + number: 0 + options { + 66001: "MountConsistencyDefault" + } + } + value { + name: "CONSISTENT" + number: 1 + options { + 66001: "MountConsistencyFull" + } + } + value { + name: "CACHED" + number: 2 + options { + 66001: "MountConsistencyCached" + } + } + value { + name: "DELEGATED" + number: 3 + options { + 66001: "MountConsistencyDelegated" + } + } + options { + 62001: 0 + 62023: "MountConsistency" + } + } + } + message_type { + name: "RestartPolicy" + field { + name: "condition" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.RestartPolicy.RestartCondition" + json_name: "condition" + } + field { + name: "delay" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "delay" + } + field { + name: "max_attempts" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "maxAttempts" + } + field { + name: "window" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "window" + } + enum_type { + name: "RestartCondition" + value { + name: "NONE" + number: 0 + options { + 66001: "RestartOnNone" + } + } + value { + name: "ON_FAILURE" + number: 1 + options { + 66001: "RestartOnFailure" + } + } + value { + name: "ANY" + number: 2 + options { + 66001: "RestartOnAny" + } + } + options { + 62001: 0 + 62023: "RestartCondition" + } + } + } + message_type { + name: "UpdateConfig" + field { + name: "parallelism" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "parallelism" + } + field { + name: "delay" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + options { + 65011: 1 + 65001: 0 + } + json_name: "delay" + } + field { + name: "failure_action" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.UpdateConfig.FailureAction" + json_name: "failureAction" + } + field { + name: "monitor" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "monitor" + } + field { + name: "max_failure_ratio" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_FLOAT + json_name: "maxFailureRatio" + } + field { + name: "order" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.UpdateConfig.UpdateOrder" + json_name: "order" + } + enum_type { + name: "FailureAction" + value { + name: "PAUSE" + number: 0 + } + value { + name: "CONTINUE" + number: 1 + } + value { + name: "ROLLBACK" + number: 2 + } + } + enum_type { + name: "UpdateOrder" + value { + name: "STOP_FIRST" + number: 0 + } + value { + name: "START_FIRST" + number: 1 + } + } + } + message_type { + name: "UpdateStatus" + field { + name: "state" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.UpdateStatus.UpdateState" + json_name: "state" + } + field { + name: "started_at" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "startedAt" + } + field { + name: "completed_at" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "completedAt" + } + field { + name: "message" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "message" + } + enum_type { + name: "UpdateState" + value { + name: "UNKNOWN" + number: 0 + } + value { + name: "UPDATING" + number: 1 + } + value { + name: "PAUSED" + number: 2 + } + value { + name: "COMPLETED" + number: 3 + } + value { + name: "ROLLBACK_STARTED" + number: 4 + } + value { + name: "ROLLBACK_PAUSED" + number: 5 + } + value { + name: "ROLLBACK_COMPLETED" + number: 6 + } + } + } + message_type { + name: "ContainerStatus" + field { + name: "container_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "containerId" + } + field { + name: "pid" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + options { + 65004: "PID" + } + json_name: "pid" + } + field { + name: "exit_code" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "exitCode" + } + } + message_type { + name: "PortStatus" + field { + name: "ports" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.PortConfig" + json_name: "ports" + } + } + message_type { + name: "TaskStatus" + field { + name: "timestamp" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "timestamp" + } + field { + name: "state" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.TaskState" + json_name: "state" + } + field { + name: "message" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "message" + } + field { + name: "err" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "err" + } + field { + name: "container" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ContainerStatus" + oneof_index: 0 + json_name: "container" + } + field { + name: "port_status" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.PortStatus" + json_name: "portStatus" + } + field { + name: "applied_by" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "appliedBy" + } + field { + name: "applied_at" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "appliedAt" + } + oneof_decl { + name: "runtime_status" + } + } + message_type { + name: "NetworkAttachmentConfig" + field { + name: "target" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "target" + } + field { + name: "aliases" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "aliases" + } + field { + name: "addresses" + number: 3 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "addresses" + } + field { + name: "driver_attachment_opts" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachmentConfig.DriverAttachmentOptsEntry" + json_name: "driverAttachmentOpts" + } + nested_type { + name: "DriverAttachmentOptsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + message_type { + name: "IPAMConfig" + field { + name: "family" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.IPAMConfig.AddressFamily" + json_name: "family" + } + field { + name: "subnet" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "subnet" + } + field { + name: "range" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "range" + } + field { + name: "gateway" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "gateway" + } + field { + name: "reserved" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IPAMConfig.ReservedEntry" + json_name: "reserved" + } + nested_type { + name: "ReservedEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + enum_type { + name: "AddressFamily" + value { + name: "UNKNOWN" + number: 0 + } + value { + name: "IPV4" + number: 4 + } + value { + name: "IPV6" + number: 6 + } + } + } + message_type { + name: "PortConfig" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "protocol" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.PortConfig.Protocol" + json_name: "protocol" + } + field { + name: "target_port" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "targetPort" + } + field { + name: "published_port" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "publishedPort" + } + field { + name: "publish_mode" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.PortConfig.PublishMode" + json_name: "publishMode" + } + enum_type { + name: "Protocol" + value { + name: "TCP" + number: 0 + options { + 66001: "ProtocolTCP" + } + } + value { + name: "UDP" + number: 1 + options { + 66001: "ProtocolUDP" + } + } + value { + name: "SCTP" + number: 2 + options { + 66001: "ProtocolSCTP" + } + } + options { + 62001: 0 + } + } + enum_type { + name: "PublishMode" + value { + name: "INGRESS" + number: 0 + options { + 66001: "PublishModeIngress" + } + } + value { + name: "HOST" + number: 1 + options { + 66001: "PublishModeHost" + } + } + options { + 62023: "PublishMode" + 62001: 0 + } + } + } + message_type { + name: "Driver" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "options" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver.OptionsEntry" + json_name: "options" + } + nested_type { + name: "OptionsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + message_type { + name: "IPAMOptions" + field { + name: "driver" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "driver" + } + field { + name: "configs" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IPAMConfig" + json_name: "configs" + } + } + message_type { + name: "Peer" + field { + name: "node_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "addr" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + } + message_type { + name: "WeightedPeer" + field { + name: "peer" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Peer" + json_name: "peer" + } + field { + name: "weight" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "weight" + } + } + message_type { + name: "IssuanceStatus" + field { + name: "state" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.IssuanceStatus.State" + json_name: "state" + } + field { + name: "err" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "err" + } + enum_type { + name: "State" + value { + name: "UNKNOWN" + number: 0 + options { + 66001: "IssuanceStateUnknown" + } + } + value { + name: "RENEW" + number: 1 + options { + 66001: "IssuanceStateRenew" + } + } + value { + name: "PENDING" + number: 2 + options { + 66001: "IssuanceStatePending" + } + } + value { + name: "ISSUED" + number: 3 + options { + 66001: "IssuanceStateIssued" + } + } + value { + name: "FAILED" + number: 4 + options { + 66001: "IssuanceStateFailed" + } + } + value { + name: "ROTATE" + number: 5 + options { + 66001: "IssuanceStateRotate" + } + } + options { + 62001: 0 + } + } + } + message_type { + name: "AcceptancePolicy" + field { + name: "policies" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy" + json_name: "policies" + } + nested_type { + name: "RoleAdmissionPolicy" + field { + name: "role" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + json_name: "role" + } + field { + name: "autoaccept" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "autoaccept" + } + field { + name: "secret" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy.Secret" + json_name: "secret" + } + nested_type { + name: "Secret" + field { + name: "data" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "data" + } + field { + name: "alg" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "alg" + } + } + } + } + message_type { + name: "ExternalCA" + field { + name: "protocol" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.ExternalCA.CAProtocol" + json_name: "protocol" + } + field { + name: "url" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "URL" + } + json_name: "url" + } + field { + name: "options" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ExternalCA.OptionsEntry" + json_name: "options" + } + field { + name: "ca_cert" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CACert" + } + json_name: "caCert" + } + nested_type { + name: "OptionsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + enum_type { + name: "CAProtocol" + value { + name: "CFSSL" + number: 0 + options { + 66001: "CAProtocolCFSSL" + } + } + } + } + message_type { + name: "CAConfig" + field { + name: "node_cert_expiry" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "nodeCertExpiry" + } + field { + name: "external_cas" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ExternalCA" + options { + 65004: "ExternalCAs" + } + json_name: "externalCas" + } + field { + name: "signing_ca_cert" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "SigningCACert" + } + json_name: "signingCaCert" + } + field { + name: "signing_ca_key" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "SigningCAKey" + } + json_name: "signingCaKey" + } + field { + name: "force_rotate" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "forceRotate" + } + } + message_type { + name: "OrchestrationConfig" + field { + name: "task_history_retention_limit" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "taskHistoryRetentionLimit" + } + } + message_type { + name: "TaskDefaults" + field { + name: "log_driver" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "logDriver" + } + } + message_type { + name: "DispatcherConfig" + field { + name: "heartbeat_period" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "heartbeatPeriod" + } + } + message_type { + name: "RaftConfig" + field { + name: "snapshot_interval" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "snapshotInterval" + } + field { + name: "keep_old_snapshots" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "keepOldSnapshots" + } + field { + name: "log_entries_for_slow_followers" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "logEntriesForSlowFollowers" + } + field { + name: "heartbeat_tick" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "heartbeatTick" + } + field { + name: "election_tick" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "electionTick" + } + } + message_type { + name: "EncryptionConfig" + field { + name: "auto_lock_managers" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "autoLockManagers" + } + } + message_type { + name: "SpreadOver" + field { + name: "spread_descriptor" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "spreadDescriptor" + } + } + message_type { + name: "PlacementPreference" + field { + name: "spread" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SpreadOver" + oneof_index: 0 + json_name: "spread" + } + oneof_decl { + name: "Preference" + } + } + message_type { + name: "Placement" + field { + name: "constraints" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "constraints" + } + field { + name: "preferences" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.PlacementPreference" + json_name: "preferences" + } + field { + name: "platforms" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Platform" + json_name: "platforms" + } + } + message_type { + name: "JoinTokens" + field { + name: "worker" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "worker" + } + field { + name: "manager" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "manager" + } + } + message_type { + name: "RootCA" + field { + name: "ca_key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CAKey" + } + json_name: "caKey" + } + field { + name: "ca_cert" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CACert" + } + json_name: "caCert" + } + field { + name: "ca_cert_hash" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "CACertHash" + } + json_name: "caCertHash" + } + field { + name: "join_tokens" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.JoinTokens" + options { + 65001: 0 + } + json_name: "joinTokens" + } + field { + name: "root_rotation" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RootRotation" + json_name: "rootRotation" + } + field { + name: "last_forced_rotation" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "lastForcedRotation" + } + } + message_type { + name: "Certificate" + field { + name: "role" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + json_name: "role" + } + field { + name: "csr" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CSR" + } + json_name: "csr" + } + field { + name: "status" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IssuanceStatus" + options { + 65001: 0 + } + json_name: "status" + } + field { + name: "certificate" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "certificate" + } + field { + name: "cn" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "CN" + } + json_name: "cn" + } + } + message_type { + name: "EncryptionKey" + field { + name: "subsystem" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "subsystem" + } + field { + name: "algorithm" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.EncryptionKey.Algorithm" + json_name: "algorithm" + } + field { + name: "key" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "key" + } + field { + name: "lamport_time" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "lamportTime" + } + enum_type { + name: "Algorithm" + value { + name: "AES_128_GCM" + number: 0 + } + options { + 62001: 0 + } + } + } + message_type { + name: "ManagerStatus" + field { + name: "raft_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "raftId" + } + field { + name: "addr" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + field { + name: "leader" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "leader" + } + field { + name: "reachability" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.RaftMemberStatus.Reachability" + json_name: "reachability" + } + } + message_type { + name: "FileTarget" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "uid" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "UID" + } + json_name: "uid" + } + field { + name: "gid" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "GID" + } + json_name: "gid" + } + field { + name: "mode" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + options { + 65003: "os.FileMode" + 65001: 0 + } + json_name: "mode" + } + } + message_type { + name: "SecretReference" + field { + name: "secret_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "secretId" + } + field { + name: "secret_name" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "secretName" + } + field { + name: "file" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.FileTarget" + oneof_index: 0 + json_name: "file" + } + oneof_decl { + name: "target" + } + } + message_type { + name: "ConfigReference" + field { + name: "config_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "configId" + } + field { + name: "config_name" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "configName" + } + field { + name: "file" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.FileTarget" + oneof_index: 0 + json_name: "file" + } + oneof_decl { + name: "target" + } + } + message_type { + name: "BlacklistedCertificate" + field { + name: "expiry" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "expiry" + } + } + message_type { + name: "HealthConfig" + field { + name: "test" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "test" + } + field { + name: "interval" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "interval" + } + field { + name: "timeout" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "timeout" + } + field { + name: "retries" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "retries" + } + field { + name: "start_period" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "startPeriod" + } + } + message_type { + name: "MaybeEncryptedRecord" + field { + name: "algorithm" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.MaybeEncryptedRecord.Algorithm" + json_name: "algorithm" + } + field { + name: "data" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "data" + } + field { + name: "nonce" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "nonce" + } + enum_type { + name: "Algorithm" + value { + name: "NONE" + number: 0 + options { + 66001: "NotEncrypted" + } + } + value { + name: "SECRETBOX_SALSA20_POLY1305" + number: 1 + options { + 66001: "NACLSecretboxSalsa20Poly1305" + } + } + value { + name: "FERNET_AES_128_CBC" + number: 2 + options { + 66001: "FernetAES128CBC" + } + } + } + } + message_type { + name: "RootRotation" + field { + name: "ca_cert" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CACert" + } + json_name: "caCert" + } + field { + name: "ca_key" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CAKey" + } + json_name: "caKey" + } + field { + name: "cross_signed_ca_cert" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CrossSignedCACert" + } + json_name: "crossSignedCaCert" + } + } + message_type { + name: "Privileges" + field { + name: "credential_spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Privileges.CredentialSpec" + json_name: "credentialSpec" + } + field { + name: "selinux_context" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Privileges.SELinuxContext" + options { + 65004: "SELinuxContext" + } + json_name: "selinuxContext" + } + nested_type { + name: "CredentialSpec" + field { + name: "file" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + oneof_index: 0 + json_name: "file" + } + field { + name: "registry" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + oneof_index: 0 + json_name: "registry" + } + oneof_decl { + name: "source" + } + } + nested_type { + name: "SELinuxContext" + field { + name: "disable" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "disable" + } + field { + name: "user" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "user" + } + field { + name: "role" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "role" + } + field { + name: "type" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "type" + } + field { + name: "level" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "level" + } + } + } + enum_type { + name: "ResourceType" + value { + name: "TASK" + number: 0 + } + value { + name: "SECRET" + number: 1 + } + value { + name: "CONFIG" + number: 2 + } + } + enum_type { + name: "TaskState" + value { + name: "NEW" + number: 0 + options { + 66001: "TaskStateNew" + } + } + value { + name: "PENDING" + number: 64 + options { + 66001: "TaskStatePending" + } + } + value { + name: "ASSIGNED" + number: 192 + options { + 66001: "TaskStateAssigned" + } + } + value { + name: "ACCEPTED" + number: 256 + options { + 66001: "TaskStateAccepted" + } + } + value { + name: "PREPARING" + number: 320 + options { + 66001: "TaskStatePreparing" + } + } + value { + name: "READY" + number: 384 + options { + 66001: "TaskStateReady" + } + } + value { + name: "STARTING" + number: 448 + options { + 66001: "TaskStateStarting" + } + } + value { + name: "RUNNING" + number: 512 + options { + 66001: "TaskStateRunning" + } + } + value { + name: "COMPLETE" + number: 576 + options { + 66001: "TaskStateCompleted" + } + } + value { + name: "SHUTDOWN" + number: 640 + options { + 66001: "TaskStateShutdown" + } + } + value { + name: "FAILED" + number: 704 + options { + 66001: "TaskStateFailed" + } + } + value { + name: "REJECTED" + number: 768 + options { + 66001: "TaskStateRejected" + } + } + value { + name: "REMOVE" + number: 800 + options { + 66001: "TaskStateRemove" + } + } + value { + name: "ORPHANED" + number: 832 + options { + 66001: "TaskStateOrphaned" + } + } + options { + 62001: 0 + 62023: "TaskState" + } + } + enum_type { + name: "NodeRole" + value { + name: "WORKER" + number: 0 + options { + 66001: "NodeRoleWorker" + } + } + value { + name: "MANAGER" + number: 1 + options { + 66001: "NodeRoleManager" + } + } + options { + 62023: "NodeRole" + 62001: 0 + } + } + syntax: "proto3" +} +file { + name: "google/protobuf/any.proto" + package: "google.protobuf" + message_type { + name: "Any" + field { + name: "type_url" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "typeUrl" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "value" + } + } + options { + java_package: "com.google.protobuf" + java_outer_classname: "AnyProto" + java_multiple_files: true + go_package: "github.com/golang/protobuf/ptypes/any" + objc_class_prefix: "GPB" + csharp_namespace: "Google.Protobuf.WellKnownTypes" + } + syntax: "proto3" +} +file { + name: "google/protobuf/wrappers.proto" + package: "google.protobuf" + message_type { + name: "DoubleValue" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_DOUBLE + json_name: "value" + } + } + message_type { + name: "FloatValue" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_FLOAT + json_name: "value" + } + } + message_type { + name: "Int64Value" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "value" + } + } + message_type { + name: "UInt64Value" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "value" + } + } + message_type { + name: "Int32Value" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "value" + } + } + message_type { + name: "UInt32Value" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "value" + } + } + message_type { + name: "BoolValue" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "value" + } + } + message_type { + name: "StringValue" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + } + message_type { + name: "BytesValue" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "value" + } + } + options { + java_package: "com.google.protobuf" + java_outer_classname: "WrappersProto" + java_multiple_files: true + go_package: "github.com/golang/protobuf/ptypes/wrappers" + cc_enable_arenas: true + objc_class_prefix: "GPB" + csharp_namespace: "Google.Protobuf.WellKnownTypes" + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/specs.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "gogoproto/gogo.proto" + dependency: "google/protobuf/duration.proto" + dependency: "google/protobuf/any.proto" + dependency: "google/protobuf/wrappers.proto" + message_type { + name: "NodeSpec" + field { + name: "annotations" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "desired_role" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + json_name: "desiredRole" + } + field { + name: "membership" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeSpec.Membership" + json_name: "membership" + } + field { + name: "availability" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeSpec.Availability" + json_name: "availability" + } + enum_type { + name: "Membership" + value { + name: "PENDING" + number: 0 + options { + 66001: "NodeMembershipPending" + } + } + value { + name: "ACCEPTED" + number: 1 + options { + 66001: "NodeMembershipAccepted" + } + } + options { + 62001: 0 + } + } + enum_type { + name: "Availability" + value { + name: "ACTIVE" + number: 0 + options { + 66001: "NodeAvailabilityActive" + } + } + value { + name: "PAUSE" + number: 1 + options { + 66001: "NodeAvailabilityPause" + } + } + value { + name: "DRAIN" + number: 2 + options { + 66001: "NodeAvailabilityDrain" + } + } + options { + 62001: 0 + } + } + } + message_type { + name: "ServiceSpec" + field { + name: "annotations" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "task" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.TaskSpec" + options { + 65001: 0 + } + json_name: "task" + } + field { + name: "replicated" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ReplicatedService" + oneof_index: 0 + json_name: "replicated" + } + field { + name: "global" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.GlobalService" + oneof_index: 0 + json_name: "global" + } + field { + name: "update" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.UpdateConfig" + json_name: "update" + } + field { + name: "rollback" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.UpdateConfig" + json_name: "rollback" + } + field { + name: "networks" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachmentConfig" + options { + deprecated: true + } + json_name: "networks" + } + field { + name: "endpoint" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EndpointSpec" + json_name: "endpoint" + } + oneof_decl { + name: "mode" + } + } + message_type { + name: "ReplicatedService" + field { + name: "replicas" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "replicas" + } + } + message_type { + name: "GlobalService" + } + message_type { + name: "TaskSpec" + field { + name: "attachment" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachmentSpec" + oneof_index: 0 + json_name: "attachment" + } + field { + name: "container" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ContainerSpec" + oneof_index: 0 + json_name: "container" + } + field { + name: "generic" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.GenericRuntimeSpec" + oneof_index: 0 + json_name: "generic" + } + field { + name: "resources" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ResourceRequirements" + json_name: "resources" + } + field { + name: "restart" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RestartPolicy" + json_name: "restart" + } + field { + name: "placement" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Placement" + json_name: "placement" + } + field { + name: "log_driver" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "logDriver" + } + field { + name: "networks" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachmentConfig" + json_name: "networks" + } + field { + name: "force_update" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "forceUpdate" + } + field { + name: "resource_references" + number: 11 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ResourceReference" + options { + 65001: 0 + } + json_name: "resourceReferences" + } + oneof_decl { + name: "runtime" + } + } + message_type { + name: "ResourceReference" + field { + name: "resource_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "resourceId" + } + field { + name: "resource_type" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.ResourceType" + json_name: "resourceType" + } + } + message_type { + name: "GenericRuntimeSpec" + field { + name: "kind" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "kind" + } + field { + name: "payload" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Any" + json_name: "payload" + } + } + message_type { + name: "NetworkAttachmentSpec" + field { + name: "container_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "containerId" + } + } + message_type { + name: "ContainerSpec" + field { + name: "image" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "image" + } + field { + name: "labels" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ContainerSpec.LabelsEntry" + json_name: "labels" + } + field { + name: "command" + number: 3 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "command" + } + field { + name: "args" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "args" + } + field { + name: "hostname" + number: 14 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "hostname" + } + field { + name: "env" + number: 5 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "env" + } + field { + name: "dir" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "dir" + } + field { + name: "user" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "user" + } + field { + name: "groups" + number: 11 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "groups" + } + field { + name: "privileges" + number: 22 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Privileges" + json_name: "privileges" + } + field { + name: "init" + number: 23 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.BoolValue" + json_name: "init" + } + field { + name: "tty" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + 65004: "TTY" + } + json_name: "tty" + } + field { + name: "open_stdin" + number: 18 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "openStdin" + } + field { + name: "read_only" + number: 19 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "readOnly" + } + field { + name: "stop_signal" + number: 20 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "stopSignal" + } + field { + name: "mounts" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Mount" + options { + 65001: 0 + } + json_name: "mounts" + } + field { + name: "stop_grace_period" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "stopGracePeriod" + } + field { + name: "pull_options" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ContainerSpec.PullOptions" + json_name: "pullOptions" + } + field { + name: "secrets" + number: 12 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SecretReference" + json_name: "secrets" + } + field { + name: "configs" + number: 21 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ConfigReference" + json_name: "configs" + } + field { + name: "hosts" + number: 17 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "hosts" + } + field { + name: "dns_config" + number: 15 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ContainerSpec.DNSConfig" + options { + 65004: "DNSConfig" + } + json_name: "dnsConfig" + } + field { + name: "healthcheck" + number: 16 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.HealthConfig" + json_name: "healthcheck" + } + field { + name: "isolation" + number: 24 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.ContainerSpec.Isolation" + json_name: "isolation" + } + field { + name: "pidsLimit" + number: 25 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "pidsLimit" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + nested_type { + name: "PullOptions" + field { + name: "registry_auth" + number: 64 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "registryAuth" + } + } + nested_type { + name: "DNSConfig" + field { + name: "nameservers" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "nameservers" + } + field { + name: "search" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "search" + } + field { + name: "options" + number: 3 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "options" + } + } + enum_type { + name: "Isolation" + value { + name: "ISOLATION_DEFAULT" + number: 0 + options { + 66001: "ContainerIsolationDefault" + } + } + value { + name: "ISOLATION_PROCESS" + number: 1 + options { + 66001: "ContainerIsolationProcess" + } + } + value { + name: "ISOLATION_HYPERV" + number: 2 + options { + 66001: "ContainerIsolationHyperV" + } + } + options { + 62001: 0 + } + } + } + message_type { + name: "EndpointSpec" + field { + name: "mode" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.EndpointSpec.ResolutionMode" + json_name: "mode" + } + field { + name: "ports" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.PortConfig" + json_name: "ports" + } + enum_type { + name: "ResolutionMode" + value { + name: "VIP" + number: 0 + options { + 66001: "ResolutionModeVirtualIP" + } + } + value { + name: "DNSRR" + number: 1 + options { + 66001: "ResolutionModeDNSRoundRobin" + } + } + options { + 62001: 0 + } + } + } + message_type { + name: "NetworkSpec" + field { + name: "annotations" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "driver_config" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "driverConfig" + } + field { + name: "ipv6_enabled" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "ipv6Enabled" + } + field { + name: "internal" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "internal" + } + field { + name: "ipam" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IPAMOptions" + options { + 65004: "IPAM" + } + json_name: "ipam" + } + field { + name: "attachable" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "attachable" + } + field { + name: "ingress" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "ingress" + } + field { + name: "network" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_STRING + oneof_index: 0 + json_name: "network" + } + oneof_decl { + name: "config_from" + } + } + message_type { + name: "ClusterSpec" + field { + name: "annotations" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "acceptance_policy" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.AcceptancePolicy" + options { + deprecated: true + 65001: 0 + } + json_name: "acceptancePolicy" + } + field { + name: "orchestration" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.OrchestrationConfig" + options { + 65001: 0 + } + json_name: "orchestration" + } + field { + name: "raft" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RaftConfig" + options { + 65001: 0 + } + json_name: "raft" + } + field { + name: "dispatcher" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.DispatcherConfig" + options { + 65001: 0 + } + json_name: "dispatcher" + } + field { + name: "ca_config" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.CAConfig" + options { + 65001: 0 + 65004: "CAConfig" + } + json_name: "caConfig" + } + field { + name: "task_defaults" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.TaskDefaults" + options { + 65001: 0 + } + json_name: "taskDefaults" + } + field { + name: "encryption_config" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EncryptionConfig" + options { + 65001: 0 + } + json_name: "encryptionConfig" + } + } + message_type { + name: "SecretSpec" + field { + name: "annotations" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "data" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "data" + } + field { + name: "templating" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "templating" + } + field { + name: "driver" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "driver" + } + } + message_type { + name: "ConfigSpec" + field { + name: "annotations" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "data" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "data" + } + field { + name: "templating" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "templating" + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + package: "docker.protobuf.plugin" + dependency: "google/protobuf/descriptor.proto" + message_type { + name: "WatchSelectors" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "id" + } + field { + name: "id_prefix" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "idPrefix" + } + field { + name: "name" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "name" + } + field { + name: "name_prefix" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "namePrefix" + } + field { + name: "custom" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "custom" + } + field { + name: "custom_prefix" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "customPrefix" + } + field { + name: "service_id" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "serviceId" + } + field { + name: "node_id" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "nodeId" + } + field { + name: "slot" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "slot" + } + field { + name: "desired_state" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "desiredState" + } + field { + name: "role" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "role" + } + field { + name: "membership" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "membership" + } + field { + name: "kind" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "kind" + } + } + message_type { + name: "StoreObject" + field { + name: "watch_selectors" + number: 1 + label: LABEL_REQUIRED + type: TYPE_MESSAGE + type_name: ".docker.protobuf.plugin.WatchSelectors" + json_name: "watchSelectors" + } + } + message_type { + name: "TLSAuthorization" + field { + name: "roles" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "roles" + } + field { + name: "insecure" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "insecure" + } + } + extension { + name: "deepcopy" + extendee: ".google.protobuf.MessageOptions" + number: 70000 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "true" + json_name: "deepcopy" + } + extension { + name: "store_object" + extendee: ".google.protobuf.MessageOptions" + number: 70001 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.protobuf.plugin.StoreObject" + json_name: "storeObject" + } + extension { + name: "tls_authorization" + extendee: ".google.protobuf.MethodOptions" + number: 73626345 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.protobuf.plugin.TLSAuthorization" + json_name: "tlsAuthorization" + } +} +file { + name: "github.com/docker/swarmkit/api/ca.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "github.com/docker/swarmkit/api/specs.proto" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "NodeCertificateStatusRequest" + field { + name: "node_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + } + message_type { + name: "NodeCertificateStatusResponse" + field { + name: "status" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IssuanceStatus" + json_name: "status" + } + field { + name: "certificate" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Certificate" + json_name: "certificate" + } + } + message_type { + name: "IssueNodeCertificateRequest" + field { + name: "role" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + options { + deprecated: true + } + json_name: "role" + } + field { + name: "csr" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CSR" + } + json_name: "csr" + } + field { + name: "token" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "token" + } + field { + name: "availability" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeSpec.Availability" + json_name: "availability" + } + } + message_type { + name: "IssueNodeCertificateResponse" + field { + name: "node_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "node_membership" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeSpec.Membership" + json_name: "nodeMembership" + } + } + message_type { + name: "GetRootCACertificateRequest" + } + message_type { + name: "GetRootCACertificateResponse" + field { + name: "certificate" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "certificate" + } + } + message_type { + name: "GetUnlockKeyRequest" + } + message_type { + name: "GetUnlockKeyResponse" + field { + name: "unlock_key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "unlockKey" + } + field { + name: "version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + options { + 65001: 0 + } + json_name: "version" + } + } + service { + name: "CA" + method { + name: "GetRootCACertificate" + input_type: ".docker.swarmkit.v1.GetRootCACertificateRequest" + output_type: ".docker.swarmkit.v1.GetRootCACertificateResponse" + options { + 73626345 { + 2: 1 + } + } + } + method { + name: "GetUnlockKey" + input_type: ".docker.swarmkit.v1.GetUnlockKeyRequest" + output_type: ".docker.swarmkit.v1.GetUnlockKeyResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + } + service { + name: "NodeCA" + method { + name: "IssueNodeCertificate" + input_type: ".docker.swarmkit.v1.IssueNodeCertificateRequest" + output_type: ".docker.swarmkit.v1.IssueNodeCertificateResponse" + options { + 73626345 { + 2: 1 + } + } + } + method { + name: "NodeCertificateStatus" + input_type: ".docker.swarmkit.v1.NodeCertificateStatusRequest" + output_type: ".docker.swarmkit.v1.NodeCertificateStatusResponse" + options { + 73626345 { + 2: 1 + } + } + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/objects.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "github.com/docker/swarmkit/api/specs.proto" + dependency: "google/protobuf/timestamp.proto" + dependency: "gogoproto/gogo.proto" + dependency: "google/protobuf/any.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "Meta" + field { + name: "version" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + options { + 65001: 0 + } + json_name: "version" + } + field { + name: "created_at" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "createdAt" + } + field { + name: "updated_at" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "updatedAt" + } + } + message_type { + name: "Node" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NodeSpec" + options { + 65001: 0 + } + json_name: "spec" + } + field { + name: "description" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NodeDescription" + json_name: "description" + } + field { + name: "status" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NodeStatus" + options { + 65001: 0 + } + json_name: "status" + } + field { + name: "manager_status" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ManagerStatus" + json_name: "managerStatus" + } + field { + name: "attachment" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachment" + options { + deprecated: true + } + json_name: "attachment" + } + field { + name: "certificate" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Certificate" + options { + 65001: 0 + } + json_name: "certificate" + } + field { + name: "role" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + json_name: "role" + } + field { + name: "attachments" + number: 10 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachment" + json_name: "attachments" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + 11: 1 + 12: 1 + } + } + } + } + message_type { + name: "Service" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ServiceSpec" + options { + 65001: 0 + } + json_name: "spec" + } + field { + name: "spec_version" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "specVersion" + } + field { + name: "previous_spec" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ServiceSpec" + json_name: "previousSpec" + } + field { + name: "previous_spec_version" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "previousSpecVersion" + } + field { + name: "endpoint" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Endpoint" + json_name: "endpoint" + } + field { + name: "update_status" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.UpdateStatus" + json_name: "updateStatus" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + } + } + } + } + message_type { + name: "Endpoint" + field { + name: "spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EndpointSpec" + json_name: "spec" + } + field { + name: "ports" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.PortConfig" + json_name: "ports" + } + field { + name: "virtual_ips" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Endpoint.VirtualIP" + options { + 65004: "VirtualIPs" + } + json_name: "virtualIps" + } + nested_type { + name: "VirtualIP" + field { + name: "network_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "networkId" + } + field { + name: "addr" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + } + } + message_type { + name: "Task" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.TaskSpec" + options { + 65001: 0 + } + json_name: "spec" + } + field { + name: "spec_version" + number: 14 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "specVersion" + } + field { + name: "service_id" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "serviceId" + } + field { + name: "slot" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "slot" + } + field { + name: "node_id" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "annotations" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "service_annotations" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "serviceAnnotations" + } + field { + name: "status" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.TaskStatus" + options { + 65001: 0 + } + json_name: "status" + } + field { + name: "desired_state" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.TaskState" + json_name: "desiredState" + } + field { + name: "networks" + number: 11 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachment" + json_name: "networks" + } + field { + name: "endpoint" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Endpoint" + json_name: "endpoint" + } + field { + name: "log_driver" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "logDriver" + } + field { + name: "assigned_generic_resources" + number: 15 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.GenericResource" + json_name: "assignedGenericResources" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + 7: 1 + 8: 1 + 9: 1 + 10: 1 + } + } + } + } + message_type { + name: "NetworkAttachment" + field { + name: "network" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + json_name: "network" + } + field { + name: "addresses" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "addresses" + } + field { + name: "aliases" + number: 3 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "aliases" + } + field { + name: "driver_attachment_opts" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachment.DriverAttachmentOptsEntry" + json_name: "driverAttachmentOpts" + } + nested_type { + name: "DriverAttachmentOptsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + message_type { + name: "Network" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkSpec" + options { + 65001: 0 + } + json_name: "spec" + } + field { + name: "driver_state" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "driverState" + } + field { + name: "ipam" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IPAMOptions" + options { + 65004: "IPAM" + } + json_name: "ipam" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + } + } + } + } + message_type { + name: "Cluster" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ClusterSpec" + options { + 65001: 0 + } + json_name: "spec" + } + field { + name: "root_ca" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RootCA" + options { + 65001: 0 + 65004: "RootCA" + } + json_name: "rootCa" + } + field { + name: "network_bootstrap_keys" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EncryptionKey" + json_name: "networkBootstrapKeys" + } + field { + name: "encryption_key_lamport_clock" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "encryptionKeyLamportClock" + } + field { + name: "blacklisted_certificates" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster.BlacklistedCertificatesEntry" + json_name: "blacklistedCertificates" + } + field { + name: "unlock_keys" + number: 9 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EncryptionKey" + json_name: "unlockKeys" + } + field { + name: "fips" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + 65004: "FIPS" + } + json_name: "fips" + } + nested_type { + name: "BlacklistedCertificatesEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.BlacklistedCertificate" + json_name: "value" + } + options { + map_entry: true + } + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + } + } + } + } + message_type { + name: "Secret" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SecretSpec" + options { + 65001: 0 + } + json_name: "spec" + } + field { + name: "internal" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "internal" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + } + } + } + } + message_type { + name: "Config" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ConfigSpec" + options { + 65001: 0 + } + json_name: "spec" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + } + } + } + } + message_type { + name: "Resource" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ID" + } + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "annotations" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "kind" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "kind" + } + field { + name: "payload" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Any" + json_name: "payload" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + 13: 1 + } + } + } + } + message_type { + name: "Extension" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ID" + } + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "annotations" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "description" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "description" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + } + } + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/control.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/specs.proto" + dependency: "github.com/docker/swarmkit/api/objects.proto" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "GetNodeRequest" + field { + name: "node_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + } + message_type { + name: "GetNodeResponse" + field { + name: "node" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + json_name: "node" + } + } + message_type { + name: "ListNodesRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListNodesRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListNodesRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "memberships" + number: 4 + label: LABEL_REPEATED + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeSpec.Membership" + options { + packed: false + } + json_name: "memberships" + } + field { + name: "roles" + number: 5 + label: LABEL_REPEATED + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + options { + packed: false + } + json_name: "roles" + } + field { + name: "name_prefixes" + number: 6 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListNodesResponse" + field { + name: "nodes" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + json_name: "nodes" + } + } + message_type { + name: "UpdateNodeRequest" + field { + name: "node_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "node_version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "nodeVersion" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NodeSpec" + json_name: "spec" + } + } + message_type { + name: "UpdateNodeResponse" + field { + name: "node" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + json_name: "node" + } + } + message_type { + name: "RemoveNodeRequest" + field { + name: "node_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "force" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "force" + } + } + message_type { + name: "RemoveNodeResponse" + } + message_type { + name: "GetTaskRequest" + field { + name: "task_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "taskId" + } + } + message_type { + name: "GetTaskResponse" + field { + name: "task" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + json_name: "task" + } + } + message_type { + name: "RemoveTaskRequest" + field { + name: "task_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "taskId" + } + } + message_type { + name: "RemoveTaskResponse" + } + message_type { + name: "ListTasksRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListTasksRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListTasksRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "service_ids" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "serviceIds" + } + field { + name: "node_ids" + number: 5 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "nodeIds" + } + field { + name: "desired_states" + number: 6 + label: LABEL_REPEATED + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.TaskState" + options { + packed: false + } + json_name: "desiredStates" + } + field { + name: "name_prefixes" + number: 7 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + field { + name: "runtimes" + number: 9 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "runtimes" + } + field { + name: "up_to_date" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "upToDate" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListTasksResponse" + field { + name: "tasks" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + json_name: "tasks" + } + } + message_type { + name: "CreateServiceRequest" + field { + name: "spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ServiceSpec" + json_name: "spec" + } + } + message_type { + name: "CreateServiceResponse" + field { + name: "service" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + json_name: "service" + } + } + message_type { + name: "GetServiceRequest" + field { + name: "service_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "serviceId" + } + field { + name: "insert_defaults" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "insertDefaults" + } + } + message_type { + name: "GetServiceResponse" + field { + name: "service" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + json_name: "service" + } + } + message_type { + name: "UpdateServiceRequest" + field { + name: "service_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "serviceId" + } + field { + name: "service_version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "serviceVersion" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ServiceSpec" + json_name: "spec" + } + field { + name: "rollback" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.UpdateServiceRequest.Rollback" + json_name: "rollback" + } + enum_type { + name: "Rollback" + value { + name: "NONE" + number: 0 + } + value { + name: "PREVIOUS" + number: 1 + } + } + } + message_type { + name: "UpdateServiceResponse" + field { + name: "service" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + json_name: "service" + } + } + message_type { + name: "RemoveServiceRequest" + field { + name: "service_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "serviceId" + } + } + message_type { + name: "RemoveServiceResponse" + } + message_type { + name: "ListServicesRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListServicesRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListServicesRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "name_prefixes" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + field { + name: "runtimes" + number: 5 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "runtimes" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListServicesResponse" + field { + name: "services" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + json_name: "services" + } + } + message_type { + name: "CreateNetworkRequest" + field { + name: "spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkSpec" + json_name: "spec" + } + } + message_type { + name: "CreateNetworkResponse" + field { + name: "network" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + json_name: "network" + } + } + message_type { + name: "GetNetworkRequest" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "network_id" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "networkId" + } + } + message_type { + name: "GetNetworkResponse" + field { + name: "network" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + json_name: "network" + } + } + message_type { + name: "RemoveNetworkRequest" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "network_id" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "networkId" + } + } + message_type { + name: "RemoveNetworkResponse" + } + message_type { + name: "ListNetworksRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListNetworksRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListNetworksRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "name_prefixes" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListNetworksResponse" + field { + name: "networks" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + json_name: "networks" + } + } + message_type { + name: "GetClusterRequest" + field { + name: "cluster_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "clusterId" + } + } + message_type { + name: "GetClusterResponse" + field { + name: "cluster" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster" + json_name: "cluster" + } + } + message_type { + name: "ListClustersRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListClustersRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListClustersRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "name_prefixes" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListClustersResponse" + field { + name: "clusters" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster" + json_name: "clusters" + } + } + message_type { + name: "KeyRotation" + field { + name: "worker_join_token" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "workerJoinToken" + } + field { + name: "manager_join_token" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "managerJoinToken" + } + field { + name: "manager_unlock_key" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "managerUnlockKey" + } + } + message_type { + name: "UpdateClusterRequest" + field { + name: "cluster_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "clusterId" + } + field { + name: "cluster_version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "clusterVersion" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ClusterSpec" + json_name: "spec" + } + field { + name: "rotation" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.KeyRotation" + options { + 65001: 0 + } + json_name: "rotation" + } + } + message_type { + name: "UpdateClusterResponse" + field { + name: "cluster" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster" + json_name: "cluster" + } + } + message_type { + name: "GetSecretRequest" + field { + name: "secret_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "secretId" + } + } + message_type { + name: "GetSecretResponse" + field { + name: "secret" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + json_name: "secret" + } + } + message_type { + name: "UpdateSecretRequest" + field { + name: "secret_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "secretId" + } + field { + name: "secret_version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "secretVersion" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SecretSpec" + json_name: "spec" + } + } + message_type { + name: "UpdateSecretResponse" + field { + name: "secret" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + json_name: "secret" + } + } + message_type { + name: "ListSecretsRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListSecretsRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListSecretsRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "name_prefixes" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListSecretsResponse" + field { + name: "secrets" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + json_name: "secrets" + } + } + message_type { + name: "CreateSecretRequest" + field { + name: "spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SecretSpec" + json_name: "spec" + } + } + message_type { + name: "CreateSecretResponse" + field { + name: "secret" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + json_name: "secret" + } + } + message_type { + name: "RemoveSecretRequest" + field { + name: "secret_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "secretId" + } + } + message_type { + name: "RemoveSecretResponse" + } + message_type { + name: "GetConfigRequest" + field { + name: "config_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "configId" + } + } + message_type { + name: "GetConfigResponse" + field { + name: "config" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + json_name: "config" + } + } + message_type { + name: "UpdateConfigRequest" + field { + name: "config_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "configId" + } + field { + name: "config_version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "configVersion" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ConfigSpec" + json_name: "spec" + } + } + message_type { + name: "UpdateConfigResponse" + field { + name: "config" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + json_name: "config" + } + } + message_type { + name: "ListConfigsRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListConfigsRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListConfigsRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "name_prefixes" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListConfigsResponse" + field { + name: "configs" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + json_name: "configs" + } + } + message_type { + name: "CreateConfigRequest" + field { + name: "spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ConfigSpec" + json_name: "spec" + } + } + message_type { + name: "CreateConfigResponse" + field { + name: "config" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + json_name: "config" + } + } + message_type { + name: "RemoveConfigRequest" + field { + name: "config_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "configId" + } + } + message_type { + name: "RemoveConfigResponse" + } + service { + name: "Control" + method { + name: "GetNode" + input_type: ".docker.swarmkit.v1.GetNodeRequest" + output_type: ".docker.swarmkit.v1.GetNodeResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListNodes" + input_type: ".docker.swarmkit.v1.ListNodesRequest" + output_type: ".docker.swarmkit.v1.ListNodesResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "UpdateNode" + input_type: ".docker.swarmkit.v1.UpdateNodeRequest" + output_type: ".docker.swarmkit.v1.UpdateNodeResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "RemoveNode" + input_type: ".docker.swarmkit.v1.RemoveNodeRequest" + output_type: ".docker.swarmkit.v1.RemoveNodeResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "GetTask" + input_type: ".docker.swarmkit.v1.GetTaskRequest" + output_type: ".docker.swarmkit.v1.GetTaskResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListTasks" + input_type: ".docker.swarmkit.v1.ListTasksRequest" + output_type: ".docker.swarmkit.v1.ListTasksResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "RemoveTask" + input_type: ".docker.swarmkit.v1.RemoveTaskRequest" + output_type: ".docker.swarmkit.v1.RemoveTaskResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "GetService" + input_type: ".docker.swarmkit.v1.GetServiceRequest" + output_type: ".docker.swarmkit.v1.GetServiceResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListServices" + input_type: ".docker.swarmkit.v1.ListServicesRequest" + output_type: ".docker.swarmkit.v1.ListServicesResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "CreateService" + input_type: ".docker.swarmkit.v1.CreateServiceRequest" + output_type: ".docker.swarmkit.v1.CreateServiceResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "UpdateService" + input_type: ".docker.swarmkit.v1.UpdateServiceRequest" + output_type: ".docker.swarmkit.v1.UpdateServiceResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "RemoveService" + input_type: ".docker.swarmkit.v1.RemoveServiceRequest" + output_type: ".docker.swarmkit.v1.RemoveServiceResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "GetNetwork" + input_type: ".docker.swarmkit.v1.GetNetworkRequest" + output_type: ".docker.swarmkit.v1.GetNetworkResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListNetworks" + input_type: ".docker.swarmkit.v1.ListNetworksRequest" + output_type: ".docker.swarmkit.v1.ListNetworksResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "CreateNetwork" + input_type: ".docker.swarmkit.v1.CreateNetworkRequest" + output_type: ".docker.swarmkit.v1.CreateNetworkResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "RemoveNetwork" + input_type: ".docker.swarmkit.v1.RemoveNetworkRequest" + output_type: ".docker.swarmkit.v1.RemoveNetworkResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "GetCluster" + input_type: ".docker.swarmkit.v1.GetClusterRequest" + output_type: ".docker.swarmkit.v1.GetClusterResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListClusters" + input_type: ".docker.swarmkit.v1.ListClustersRequest" + output_type: ".docker.swarmkit.v1.ListClustersResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "UpdateCluster" + input_type: ".docker.swarmkit.v1.UpdateClusterRequest" + output_type: ".docker.swarmkit.v1.UpdateClusterResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "GetSecret" + input_type: ".docker.swarmkit.v1.GetSecretRequest" + output_type: ".docker.swarmkit.v1.GetSecretResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "UpdateSecret" + input_type: ".docker.swarmkit.v1.UpdateSecretRequest" + output_type: ".docker.swarmkit.v1.UpdateSecretResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListSecrets" + input_type: ".docker.swarmkit.v1.ListSecretsRequest" + output_type: ".docker.swarmkit.v1.ListSecretsResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "CreateSecret" + input_type: ".docker.swarmkit.v1.CreateSecretRequest" + output_type: ".docker.swarmkit.v1.CreateSecretResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "RemoveSecret" + input_type: ".docker.swarmkit.v1.RemoveSecretRequest" + output_type: ".docker.swarmkit.v1.RemoveSecretResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "GetConfig" + input_type: ".docker.swarmkit.v1.GetConfigRequest" + output_type: ".docker.swarmkit.v1.GetConfigResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "UpdateConfig" + input_type: ".docker.swarmkit.v1.UpdateConfigRequest" + output_type: ".docker.swarmkit.v1.UpdateConfigResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListConfigs" + input_type: ".docker.swarmkit.v1.ListConfigsRequest" + output_type: ".docker.swarmkit.v1.ListConfigsResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "CreateConfig" + input_type: ".docker.swarmkit.v1.CreateConfigRequest" + output_type: ".docker.swarmkit.v1.CreateConfigResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "RemoveConfig" + input_type: ".docker.swarmkit.v1.RemoveConfigRequest" + output_type: ".docker.swarmkit.v1.RemoveConfigResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/dispatcher.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "github.com/docker/swarmkit/api/objects.proto" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + dependency: "google/protobuf/duration.proto" + message_type { + name: "SessionRequest" + field { + name: "description" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NodeDescription" + json_name: "description" + } + field { + name: "session_id" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sessionId" + } + } + message_type { + name: "SessionMessage" + field { + name: "session_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sessionId" + } + field { + name: "node" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + json_name: "node" + } + field { + name: "managers" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.WeightedPeer" + json_name: "managers" + } + field { + name: "network_bootstrap_keys" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EncryptionKey" + json_name: "networkBootstrapKeys" + } + field { + name: "RootCA" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "RootCA" + } + } + message_type { + name: "HeartbeatRequest" + field { + name: "session_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sessionId" + } + } + message_type { + name: "HeartbeatResponse" + field { + name: "period" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + options { + 65011: 1 + 65001: 0 + } + json_name: "period" + } + } + message_type { + name: "UpdateTaskStatusRequest" + field { + name: "session_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sessionId" + } + field { + name: "updates" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.UpdateTaskStatusRequest.TaskStatusUpdate" + json_name: "updates" + } + nested_type { + name: "TaskStatusUpdate" + field { + name: "task_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "taskId" + } + field { + name: "status" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.TaskStatus" + json_name: "status" + } + } + } + message_type { + name: "UpdateTaskStatusResponse" + } + message_type { + name: "TasksRequest" + field { + name: "session_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sessionId" + } + } + message_type { + name: "TasksMessage" + field { + name: "tasks" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + json_name: "tasks" + } + } + message_type { + name: "AssignmentsRequest" + field { + name: "session_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sessionId" + } + } + message_type { + name: "Assignment" + field { + name: "task" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + oneof_index: 0 + json_name: "task" + } + field { + name: "secret" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + oneof_index: 0 + json_name: "secret" + } + field { + name: "config" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + oneof_index: 0 + json_name: "config" + } + oneof_decl { + name: "item" + } + } + message_type { + name: "AssignmentChange" + field { + name: "assignment" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Assignment" + json_name: "assignment" + } + field { + name: "action" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.AssignmentChange.AssignmentAction" + json_name: "action" + } + enum_type { + name: "AssignmentAction" + value { + name: "UPDATE" + number: 0 + options { + 66001: "AssignmentActionUpdate" + } + } + value { + name: "REMOVE" + number: 1 + options { + 66001: "AssignmentActionRemove" + } + } + } + } + message_type { + name: "AssignmentsMessage" + field { + name: "type" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.AssignmentsMessage.Type" + json_name: "type" + } + field { + name: "applies_to" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "appliesTo" + } + field { + name: "results_in" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "resultsIn" + } + field { + name: "changes" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.AssignmentChange" + json_name: "changes" + } + enum_type { + name: "Type" + value { + name: "COMPLETE" + number: 0 + } + value { + name: "INCREMENTAL" + number: 1 + } + } + } + service { + name: "Dispatcher" + method { + name: "Session" + input_type: ".docker.swarmkit.v1.SessionRequest" + output_type: ".docker.swarmkit.v1.SessionMessage" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + server_streaming: true + } + method { + name: "Heartbeat" + input_type: ".docker.swarmkit.v1.HeartbeatRequest" + output_type: ".docker.swarmkit.v1.HeartbeatResponse" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + } + method { + name: "UpdateTaskStatus" + input_type: ".docker.swarmkit.v1.UpdateTaskStatusRequest" + output_type: ".docker.swarmkit.v1.UpdateTaskStatusResponse" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + } + method { + name: "Tasks" + input_type: ".docker.swarmkit.v1.TasksRequest" + output_type: ".docker.swarmkit.v1.TasksMessage" + options { + deprecated: true + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + server_streaming: true + } + method { + name: "Assignments" + input_type: ".docker.swarmkit.v1.AssignmentsRequest" + output_type: ".docker.swarmkit.v1.AssignmentsMessage" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + server_streaming: true + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/health.proto" + package: "docker.swarmkit.v1" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "HealthCheckRequest" + field { + name: "service" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "service" + } + } + message_type { + name: "HealthCheckResponse" + field { + name: "status" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.HealthCheckResponse.ServingStatus" + json_name: "status" + } + enum_type { + name: "ServingStatus" + value { + name: "UNKNOWN" + number: 0 + } + value { + name: "SERVING" + number: 1 + } + value { + name: "NOT_SERVING" + number: 2 + } + } + } + service { + name: "Health" + method { + name: "Check" + input_type: ".docker.swarmkit.v1.HealthCheckRequest" + output_type: ".docker.swarmkit.v1.HealthCheckResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/logbroker.proto" + package: "docker.swarmkit.v1" + dependency: "gogoproto/gogo.proto" + dependency: "google/protobuf/timestamp.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "LogSubscriptionOptions" + field { + name: "streams" + number: 1 + label: LABEL_REPEATED + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.LogStream" + options { + packed: false + } + json_name: "streams" + } + field { + name: "follow" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "follow" + } + field { + name: "tail" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "tail" + } + field { + name: "since" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "since" + } + } + message_type { + name: "LogSelector" + field { + name: "service_ids" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "serviceIds" + } + field { + name: "node_ids" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "nodeIds" + } + field { + name: "task_ids" + number: 3 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "taskIds" + } + } + message_type { + name: "LogContext" + field { + name: "service_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "serviceId" + } + field { + name: "node_id" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "task_id" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "taskId" + } + } + message_type { + name: "LogAttr" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + } + message_type { + name: "LogMessage" + field { + name: "context" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogContext" + options { + 65001: 0 + } + json_name: "context" + } + field { + name: "timestamp" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "timestamp" + } + field { + name: "stream" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.LogStream" + json_name: "stream" + } + field { + name: "data" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "data" + } + field { + name: "attrs" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogAttr" + options { + 65001: 0 + } + json_name: "attrs" + } + } + message_type { + name: "SubscribeLogsRequest" + field { + name: "selector" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogSelector" + json_name: "selector" + } + field { + name: "options" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogSubscriptionOptions" + json_name: "options" + } + } + message_type { + name: "SubscribeLogsMessage" + field { + name: "messages" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogMessage" + options { + 65001: 0 + } + json_name: "messages" + } + } + message_type { + name: "ListenSubscriptionsRequest" + } + message_type { + name: "SubscriptionMessage" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "selector" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogSelector" + json_name: "selector" + } + field { + name: "options" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogSubscriptionOptions" + json_name: "options" + } + field { + name: "close" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "close" + } + } + message_type { + name: "PublishLogsMessage" + field { + name: "subscription_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "subscriptionId" + } + field { + name: "messages" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogMessage" + options { + 65001: 0 + } + json_name: "messages" + } + field { + name: "close" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "close" + } + } + message_type { + name: "PublishLogsResponse" + } + enum_type { + name: "LogStream" + value { + name: "LOG_STREAM_UNKNOWN" + number: 0 + options { + 66001: "LogStreamUnknown" + } + } + value { + name: "LOG_STREAM_STDOUT" + number: 1 + options { + 66001: "LogStreamStdout" + } + } + value { + name: "LOG_STREAM_STDERR" + number: 2 + options { + 66001: "LogStreamStderr" + } + } + options { + 62001: 0 + 62023: "LogStream" + } + } + service { + name: "Logs" + method { + name: "SubscribeLogs" + input_type: ".docker.swarmkit.v1.SubscribeLogsRequest" + output_type: ".docker.swarmkit.v1.SubscribeLogsMessage" + options { + 73626345 { + 1: "swarm-manager" + } + } + server_streaming: true + } + } + service { + name: "LogBroker" + method { + name: "ListenSubscriptions" + input_type: ".docker.swarmkit.v1.ListenSubscriptionsRequest" + output_type: ".docker.swarmkit.v1.SubscriptionMessage" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + server_streaming: true + } + method { + name: "PublishLogs" + input_type: ".docker.swarmkit.v1.PublishLogsMessage" + output_type: ".docker.swarmkit.v1.PublishLogsResponse" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + client_streaming: true + } + } + syntax: "proto3" +} +file { + name: "github.com/coreos/etcd/raft/raftpb/raft.proto" + package: "raftpb" + dependency: "gogoproto/gogo.proto" + message_type { + name: "Entry" + field { + name: "Term" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "Term" + } + field { + name: "Index" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "Index" + } + field { + name: "Type" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".raftpb.EntryType" + options { + 65001: 0 + } + json_name: "Type" + } + field { + name: "Data" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "Data" + } + } + message_type { + name: "SnapshotMetadata" + field { + name: "conf_state" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".raftpb.ConfState" + options { + 65001: 0 + } + json_name: "confState" + } + field { + name: "index" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "index" + } + field { + name: "term" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "term" + } + } + message_type { + name: "Snapshot" + field { + name: "data" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "data" + } + field { + name: "metadata" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".raftpb.SnapshotMetadata" + options { + 65001: 0 + } + json_name: "metadata" + } + } + message_type { + name: "Message" + field { + name: "type" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".raftpb.MessageType" + options { + 65001: 0 + } + json_name: "type" + } + field { + name: "to" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "to" + } + field { + name: "from" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "from" + } + field { + name: "term" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "term" + } + field { + name: "logTerm" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "logTerm" + } + field { + name: "index" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "index" + } + field { + name: "entries" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".raftpb.Entry" + options { + 65001: 0 + } + json_name: "entries" + } + field { + name: "commit" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "commit" + } + field { + name: "snapshot" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".raftpb.Snapshot" + options { + 65001: 0 + } + json_name: "snapshot" + } + field { + name: "reject" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + 65001: 0 + } + json_name: "reject" + } + field { + name: "rejectHint" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "rejectHint" + } + field { + name: "context" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "context" + } + } + message_type { + name: "HardState" + field { + name: "term" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "term" + } + field { + name: "vote" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "vote" + } + field { + name: "commit" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "commit" + } + } + message_type { + name: "ConfState" + field { + name: "nodes" + number: 1 + label: LABEL_REPEATED + type: TYPE_UINT64 + json_name: "nodes" + } + } + message_type { + name: "ConfChange" + field { + name: "ID" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "ID" + } + field { + name: "Type" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".raftpb.ConfChangeType" + options { + 65001: 0 + } + json_name: "Type" + } + field { + name: "NodeID" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "NodeID" + } + field { + name: "Context" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "Context" + } + } + enum_type { + name: "EntryType" + value { + name: "EntryNormal" + number: 0 + } + value { + name: "EntryConfChange" + number: 1 + } + } + enum_type { + name: "MessageType" + value { + name: "MsgHup" + number: 0 + } + value { + name: "MsgBeat" + number: 1 + } + value { + name: "MsgProp" + number: 2 + } + value { + name: "MsgApp" + number: 3 + } + value { + name: "MsgAppResp" + number: 4 + } + value { + name: "MsgVote" + number: 5 + } + value { + name: "MsgVoteResp" + number: 6 + } + value { + name: "MsgSnap" + number: 7 + } + value { + name: "MsgHeartbeat" + number: 8 + } + value { + name: "MsgHeartbeatResp" + number: 9 + } + value { + name: "MsgUnreachable" + number: 10 + } + value { + name: "MsgSnapStatus" + number: 11 + } + value { + name: "MsgCheckQuorum" + number: 12 + } + value { + name: "MsgTransferLeader" + number: 13 + } + value { + name: "MsgTimeoutNow" + number: 14 + } + value { + name: "MsgReadIndex" + number: 15 + } + value { + name: "MsgReadIndexResp" + number: 16 + } + value { + name: "MsgPreVote" + number: 17 + } + value { + name: "MsgPreVoteResp" + number: 18 + } + } + enum_type { + name: "ConfChangeType" + value { + name: "ConfChangeAddNode" + number: 0 + } + value { + name: "ConfChangeRemoveNode" + number: 1 + } + value { + name: "ConfChangeUpdateNode" + number: 2 + } + } + options { + 63017: 1 + 63020: 1 + 63018: 1 + 63001: 0 + 63002: 0 + } +} +file { + name: "github.com/docker/swarmkit/api/raft.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/objects.proto" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "github.com/coreos/etcd/raft/raftpb/raft.proto" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "RaftMember" + field { + name: "raft_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "raftId" + } + field { + name: "node_id" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "addr" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + field { + name: "status" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RaftMemberStatus" + options { + 65001: 0 + } + json_name: "status" + } + } + message_type { + name: "JoinRequest" + field { + name: "addr" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + } + message_type { + name: "JoinResponse" + field { + name: "raft_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "raftId" + } + field { + name: "members" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RaftMember" + json_name: "members" + } + field { + name: "removed_members" + number: 3 + label: LABEL_REPEATED + type: TYPE_UINT64 + options { + packed: false + } + json_name: "removedMembers" + } + } + message_type { + name: "LeaveRequest" + field { + name: "node" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RaftMember" + json_name: "node" + } + } + message_type { + name: "LeaveResponse" + } + message_type { + name: "ProcessRaftMessageRequest" + field { + name: "message" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".raftpb.Message" + json_name: "message" + } + options { + 70000: 0 + } + } + message_type { + name: "ProcessRaftMessageResponse" + } + message_type { + name: "StreamRaftMessageRequest" + field { + name: "message" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".raftpb.Message" + json_name: "message" + } + options { + 70000: 0 + } + } + message_type { + name: "StreamRaftMessageResponse" + } + message_type { + name: "ResolveAddressRequest" + field { + name: "raft_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "raftId" + } + } + message_type { + name: "ResolveAddressResponse" + field { + name: "addr" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + } + message_type { + name: "InternalRaftRequest" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "id" + } + field { + name: "action" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.StoreAction" + options { + 65001: 0 + } + json_name: "action" + } + } + message_type { + name: "StoreAction" + field { + name: "action" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.StoreActionKind" + json_name: "action" + } + field { + name: "node" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + oneof_index: 0 + json_name: "node" + } + field { + name: "service" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + oneof_index: 0 + json_name: "service" + } + field { + name: "task" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + oneof_index: 0 + json_name: "task" + } + field { + name: "network" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + oneof_index: 0 + json_name: "network" + } + field { + name: "cluster" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster" + oneof_index: 0 + json_name: "cluster" + } + field { + name: "secret" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + oneof_index: 0 + json_name: "secret" + } + field { + name: "resource" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Resource" + oneof_index: 0 + json_name: "resource" + } + field { + name: "extension" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Extension" + oneof_index: 0 + json_name: "extension" + } + field { + name: "config" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + oneof_index: 0 + json_name: "config" + } + oneof_decl { + name: "target" + } + } + enum_type { + name: "StoreActionKind" + value { + name: "UNKNOWN" + number: 0 + options { + 66001: "StoreActionKindUnknown" + } + } + value { + name: "STORE_ACTION_CREATE" + number: 1 + options { + 66001: "StoreActionKindCreate" + } + } + value { + name: "STORE_ACTION_UPDATE" + number: 2 + options { + 66001: "StoreActionKindUpdate" + } + } + value { + name: "STORE_ACTION_REMOVE" + number: 3 + options { + 66001: "StoreActionKindRemove" + } + } + options { + 62001: 0 + 62023: "StoreActionKind" + } + } + service { + name: "Raft" + method { + name: "ProcessRaftMessage" + input_type: ".docker.swarmkit.v1.ProcessRaftMessageRequest" + output_type: ".docker.swarmkit.v1.ProcessRaftMessageResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "StreamRaftMessage" + input_type: ".docker.swarmkit.v1.StreamRaftMessageRequest" + output_type: ".docker.swarmkit.v1.StreamRaftMessageResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + client_streaming: true + } + method { + name: "ResolveAddress" + input_type: ".docker.swarmkit.v1.ResolveAddressRequest" + output_type: ".docker.swarmkit.v1.ResolveAddressResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + } + service { + name: "RaftMembership" + method { + name: "Join" + input_type: ".docker.swarmkit.v1.JoinRequest" + output_type: ".docker.swarmkit.v1.JoinResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "Leave" + input_type: ".docker.swarmkit.v1.LeaveRequest" + output_type: ".docker.swarmkit.v1.LeaveResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + } + weak_dependency: 3 + weak_dependency: 4 + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/resource.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "AttachNetworkRequest" + field { + name: "config" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachmentConfig" + json_name: "config" + } + field { + name: "container_id" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "containerId" + } + } + message_type { + name: "AttachNetworkResponse" + field { + name: "attachment_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "attachmentId" + } + } + message_type { + name: "DetachNetworkRequest" + field { + name: "attachment_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "attachmentId" + } + } + message_type { + name: "DetachNetworkResponse" + } + service { + name: "ResourceAllocator" + method { + name: "AttachNetwork" + input_type: ".docker.swarmkit.v1.AttachNetworkRequest" + output_type: ".docker.swarmkit.v1.AttachNetworkResponse" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + } + method { + name: "DetachNetwork" + input_type: ".docker.swarmkit.v1.DetachNetworkRequest" + output_type: ".docker.swarmkit.v1.DetachNetworkResponse" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/snapshot.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/objects.proto" + dependency: "github.com/docker/swarmkit/api/raft.proto" + dependency: "gogoproto/gogo.proto" + message_type { + name: "StoreSnapshot" + field { + name: "nodes" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + json_name: "nodes" + } + field { + name: "services" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + json_name: "services" + } + field { + name: "networks" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + json_name: "networks" + } + field { + name: "tasks" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + json_name: "tasks" + } + field { + name: "clusters" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster" + json_name: "clusters" + } + field { + name: "secrets" + number: 6 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + json_name: "secrets" + } + field { + name: "resources" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Resource" + json_name: "resources" + } + field { + name: "extensions" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Extension" + json_name: "extensions" + } + field { + name: "configs" + number: 9 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + json_name: "configs" + } + } + message_type { + name: "ClusterSnapshot" + field { + name: "members" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RaftMember" + json_name: "members" + } + field { + name: "removed" + number: 2 + label: LABEL_REPEATED + type: TYPE_UINT64 + options { + packed: false + } + json_name: "removed" + } + } + message_type { + name: "Snapshot" + field { + name: "version" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.Snapshot.Version" + json_name: "version" + } + field { + name: "membership" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ClusterSnapshot" + options { + 65001: 0 + } + json_name: "membership" + } + field { + name: "store" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.StoreSnapshot" + options { + 65001: 0 + } + json_name: "store" + } + enum_type { + name: "Version" + value { + name: "V0" + number: 0 + } + } + } + weak_dependency: 2 + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/watch.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/specs.proto" + dependency: "github.com/docker/swarmkit/api/objects.proto" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "Object" + field { + name: "node" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + oneof_index: 0 + json_name: "node" + } + field { + name: "service" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + oneof_index: 0 + json_name: "service" + } + field { + name: "network" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + oneof_index: 0 + json_name: "network" + } + field { + name: "task" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + oneof_index: 0 + json_name: "task" + } + field { + name: "cluster" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster" + oneof_index: 0 + json_name: "cluster" + } + field { + name: "secret" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + oneof_index: 0 + json_name: "secret" + } + field { + name: "resource" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Resource" + oneof_index: 0 + json_name: "resource" + } + field { + name: "extension" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Extension" + oneof_index: 0 + json_name: "extension" + } + field { + name: "config" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + oneof_index: 0 + json_name: "config" + } + oneof_decl { + name: "Object" + } + } + message_type { + name: "SelectBySlot" + field { + name: "service_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ServiceID" + } + json_name: "serviceId" + } + field { + name: "slot" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "slot" + } + } + message_type { + name: "SelectByCustom" + field { + name: "kind" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "kind" + } + field { + name: "index" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "index" + } + field { + name: "value" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + } + message_type { + name: "SelectBy" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ID" + } + oneof_index: 0 + json_name: "id" + } + field { + name: "id_prefix" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "IDPrefix" + } + oneof_index: 0 + json_name: "idPrefix" + } + field { + name: "name" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + oneof_index: 0 + json_name: "name" + } + field { + name: "name_prefix" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + oneof_index: 0 + json_name: "namePrefix" + } + field { + name: "custom" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SelectByCustom" + oneof_index: 0 + json_name: "custom" + } + field { + name: "custom_prefix" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SelectByCustom" + oneof_index: 0 + json_name: "customPrefix" + } + field { + name: "service_id" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ServiceID" + } + oneof_index: 0 + json_name: "serviceId" + } + field { + name: "node_id" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "NodeID" + } + oneof_index: 0 + json_name: "nodeId" + } + field { + name: "slot" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SelectBySlot" + oneof_index: 0 + json_name: "slot" + } + field { + name: "desired_state" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.TaskState" + oneof_index: 0 + json_name: "desiredState" + } + field { + name: "role" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + oneof_index: 0 + json_name: "role" + } + field { + name: "membership" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeSpec.Membership" + oneof_index: 0 + json_name: "membership" + } + field { + name: "referenced_network_id" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ReferencedNetworkID" + } + oneof_index: 0 + json_name: "referencedNetworkId" + } + field { + name: "referenced_secret_id" + number: 14 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ReferencedSecretID" + } + oneof_index: 0 + json_name: "referencedSecretId" + } + field { + name: "referenced_config_id" + number: 16 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ReferencedConfigID" + } + oneof_index: 0 + json_name: "referencedConfigId" + } + field { + name: "kind" + number: 15 + label: LABEL_OPTIONAL + type: TYPE_STRING + oneof_index: 0 + json_name: "kind" + } + oneof_decl { + name: "By" + } + } + message_type { + name: "WatchRequest" + field { + name: "entries" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.WatchRequest.WatchEntry" + json_name: "entries" + } + field { + name: "resume_from" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "resumeFrom" + } + field { + name: "include_old_object" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "includeOldObject" + } + nested_type { + name: "WatchEntry" + field { + name: "kind" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "kind" + } + field { + name: "action" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.WatchActionKind" + json_name: "action" + } + field { + name: "filters" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SelectBy" + json_name: "filters" + } + } + } + message_type { + name: "WatchMessage" + field { + name: "events" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.WatchMessage.Event" + json_name: "events" + } + field { + name: "version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "version" + } + nested_type { + name: "Event" + field { + name: "action" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.WatchActionKind" + json_name: "action" + } + field { + name: "object" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Object" + json_name: "object" + } + field { + name: "old_object" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Object" + json_name: "oldObject" + } + } + } + enum_type { + name: "WatchActionKind" + value { + name: "WATCH_ACTION_UNKNOWN" + number: 0 + options { + 66001: "WatchActionKindUnknown" + } + } + value { + name: "WATCH_ACTION_CREATE" + number: 1 + options { + 66001: "WatchActionKindCreate" + } + } + value { + name: "WATCH_ACTION_UPDATE" + number: 2 + options { + 66001: "WatchActionKindUpdate" + } + } + value { + name: "WATCH_ACTION_REMOVE" + number: 4 + options { + 66001: "WatchActionKindRemove" + } + } + options { + 62001: 0 + 62023: "WatchActionKind" + } + } + service { + name: "Watch" + method { + name: "Watch" + input_type: ".docker.swarmkit.v1.WatchRequest" + output_type: ".docker.swarmkit.v1.WatchMessage" + options { + 73626345 { + 1: "swarm-manager" + } + } + server_streaming: true + } + } + syntax: "proto3" +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/ca.pb.go b/extender/code/vendor/github.com/docker/swarmkit/api/ca.pb.go new file mode 100644 index 000000000..23d375f95 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/ca.pb.go @@ -0,0 +1,2321 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/ca.proto + +/* + Package api is a generated protocol buffer package. + + It is generated from these files: + github.com/docker/swarmkit/api/ca.proto + github.com/docker/swarmkit/api/control.proto + github.com/docker/swarmkit/api/dispatcher.proto + github.com/docker/swarmkit/api/health.proto + github.com/docker/swarmkit/api/logbroker.proto + github.com/docker/swarmkit/api/objects.proto + github.com/docker/swarmkit/api/raft.proto + github.com/docker/swarmkit/api/resource.proto + github.com/docker/swarmkit/api/snapshot.proto + github.com/docker/swarmkit/api/specs.proto + github.com/docker/swarmkit/api/types.proto + github.com/docker/swarmkit/api/watch.proto + + It has these top-level messages: + NodeCertificateStatusRequest + NodeCertificateStatusResponse + IssueNodeCertificateRequest + IssueNodeCertificateResponse + GetRootCACertificateRequest + GetRootCACertificateResponse + GetUnlockKeyRequest + GetUnlockKeyResponse + GetNodeRequest + GetNodeResponse + ListNodesRequest + ListNodesResponse + UpdateNodeRequest + UpdateNodeResponse + RemoveNodeRequest + RemoveNodeResponse + GetTaskRequest + GetTaskResponse + RemoveTaskRequest + RemoveTaskResponse + ListTasksRequest + ListTasksResponse + CreateServiceRequest + CreateServiceResponse + GetServiceRequest + GetServiceResponse + UpdateServiceRequest + UpdateServiceResponse + RemoveServiceRequest + RemoveServiceResponse + ListServicesRequest + ListServicesResponse + CreateNetworkRequest + CreateNetworkResponse + GetNetworkRequest + GetNetworkResponse + RemoveNetworkRequest + RemoveNetworkResponse + ListNetworksRequest + ListNetworksResponse + GetClusterRequest + GetClusterResponse + ListClustersRequest + ListClustersResponse + KeyRotation + UpdateClusterRequest + UpdateClusterResponse + GetSecretRequest + GetSecretResponse + UpdateSecretRequest + UpdateSecretResponse + ListSecretsRequest + ListSecretsResponse + CreateSecretRequest + CreateSecretResponse + RemoveSecretRequest + RemoveSecretResponse + GetConfigRequest + GetConfigResponse + UpdateConfigRequest + UpdateConfigResponse + ListConfigsRequest + ListConfigsResponse + CreateConfigRequest + CreateConfigResponse + RemoveConfigRequest + RemoveConfigResponse + SessionRequest + SessionMessage + HeartbeatRequest + HeartbeatResponse + UpdateTaskStatusRequest + UpdateTaskStatusResponse + TasksRequest + TasksMessage + AssignmentsRequest + Assignment + AssignmentChange + AssignmentsMessage + HealthCheckRequest + HealthCheckResponse + LogSubscriptionOptions + LogSelector + LogContext + LogAttr + LogMessage + SubscribeLogsRequest + SubscribeLogsMessage + ListenSubscriptionsRequest + SubscriptionMessage + PublishLogsMessage + PublishLogsResponse + Meta + Node + Service + Endpoint + Task + NetworkAttachment + Network + Cluster + Secret + Config + Resource + Extension + RaftMember + JoinRequest + JoinResponse + LeaveRequest + LeaveResponse + ProcessRaftMessageRequest + ProcessRaftMessageResponse + StreamRaftMessageRequest + StreamRaftMessageResponse + ResolveAddressRequest + ResolveAddressResponse + InternalRaftRequest + StoreAction + AttachNetworkRequest + AttachNetworkResponse + DetachNetworkRequest + DetachNetworkResponse + StoreSnapshot + ClusterSnapshot + Snapshot + NodeSpec + ServiceSpec + ReplicatedService + GlobalService + TaskSpec + ResourceReference + GenericRuntimeSpec + NetworkAttachmentSpec + ContainerSpec + EndpointSpec + NetworkSpec + ClusterSpec + SecretSpec + ConfigSpec + Version + IndexEntry + Annotations + NamedGenericResource + DiscreteGenericResource + GenericResource + Resources + ResourceRequirements + Platform + PluginDescription + EngineDescription + NodeDescription + NodeTLSInfo + RaftMemberStatus + NodeStatus + Image + Mount + RestartPolicy + UpdateConfig + UpdateStatus + ContainerStatus + PortStatus + TaskStatus + NetworkAttachmentConfig + IPAMConfig + PortConfig + Driver + IPAMOptions + Peer + WeightedPeer + IssuanceStatus + AcceptancePolicy + ExternalCA + CAConfig + OrchestrationConfig + TaskDefaults + DispatcherConfig + RaftConfig + EncryptionConfig + SpreadOver + PlacementPreference + Placement + JoinTokens + RootCA + Certificate + EncryptionKey + ManagerStatus + FileTarget + SecretReference + ConfigReference + BlacklistedCertificate + HealthConfig + MaybeEncryptedRecord + RootRotation + Privileges + Object + SelectBySlot + SelectByCustom + SelectBy + WatchRequest + WatchMessage +*/ +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type NodeCertificateStatusRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` +} + +func (m *NodeCertificateStatusRequest) Reset() { *m = NodeCertificateStatusRequest{} } +func (*NodeCertificateStatusRequest) ProtoMessage() {} +func (*NodeCertificateStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{0} } + +type NodeCertificateStatusResponse struct { + Status *IssuanceStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` + Certificate *Certificate `protobuf:"bytes,2,opt,name=certificate" json:"certificate,omitempty"` +} + +func (m *NodeCertificateStatusResponse) Reset() { *m = NodeCertificateStatusResponse{} } +func (*NodeCertificateStatusResponse) ProtoMessage() {} +func (*NodeCertificateStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{1} } + +type IssueNodeCertificateRequest struct { + // DEPRECATED: Role is now selected based on which secret is matched. + Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // CSR is the certificate signing request. + CSR []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` + // Token represents a user-provided string that is necessary for new + // nodes to join the cluster + Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` + // Availability allows a user to control the current scheduling status of a node + Availability NodeSpec_Availability `protobuf:"varint,4,opt,name=availability,proto3,enum=docker.swarmkit.v1.NodeSpec_Availability" json:"availability,omitempty"` +} + +func (m *IssueNodeCertificateRequest) Reset() { *m = IssueNodeCertificateRequest{} } +func (*IssueNodeCertificateRequest) ProtoMessage() {} +func (*IssueNodeCertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{2} } + +type IssueNodeCertificateResponse struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + NodeMembership NodeSpec_Membership `protobuf:"varint,2,opt,name=node_membership,json=nodeMembership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"node_membership,omitempty"` +} + +func (m *IssueNodeCertificateResponse) Reset() { *m = IssueNodeCertificateResponse{} } +func (*IssueNodeCertificateResponse) ProtoMessage() {} +func (*IssueNodeCertificateResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{3} } + +type GetRootCACertificateRequest struct { +} + +func (m *GetRootCACertificateRequest) Reset() { *m = GetRootCACertificateRequest{} } +func (*GetRootCACertificateRequest) ProtoMessage() {} +func (*GetRootCACertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{4} } + +type GetRootCACertificateResponse struct { + Certificate []byte `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate,omitempty"` +} + +func (m *GetRootCACertificateResponse) Reset() { *m = GetRootCACertificateResponse{} } +func (*GetRootCACertificateResponse) ProtoMessage() {} +func (*GetRootCACertificateResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{5} } + +type GetUnlockKeyRequest struct { +} + +func (m *GetUnlockKeyRequest) Reset() { *m = GetUnlockKeyRequest{} } +func (*GetUnlockKeyRequest) ProtoMessage() {} +func (*GetUnlockKeyRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{6} } + +type GetUnlockKeyResponse struct { + UnlockKey []byte `protobuf:"bytes,1,opt,name=unlock_key,json=unlockKey,proto3" json:"unlock_key,omitempty"` + Version Version `protobuf:"bytes,2,opt,name=version" json:"version"` +} + +func (m *GetUnlockKeyResponse) Reset() { *m = GetUnlockKeyResponse{} } +func (*GetUnlockKeyResponse) ProtoMessage() {} +func (*GetUnlockKeyResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{7} } + +func init() { + proto.RegisterType((*NodeCertificateStatusRequest)(nil), "docker.swarmkit.v1.NodeCertificateStatusRequest") + proto.RegisterType((*NodeCertificateStatusResponse)(nil), "docker.swarmkit.v1.NodeCertificateStatusResponse") + proto.RegisterType((*IssueNodeCertificateRequest)(nil), "docker.swarmkit.v1.IssueNodeCertificateRequest") + proto.RegisterType((*IssueNodeCertificateResponse)(nil), "docker.swarmkit.v1.IssueNodeCertificateResponse") + proto.RegisterType((*GetRootCACertificateRequest)(nil), "docker.swarmkit.v1.GetRootCACertificateRequest") + proto.RegisterType((*GetRootCACertificateResponse)(nil), "docker.swarmkit.v1.GetRootCACertificateResponse") + proto.RegisterType((*GetUnlockKeyRequest)(nil), "docker.swarmkit.v1.GetUnlockKeyRequest") + proto.RegisterType((*GetUnlockKeyResponse)(nil), "docker.swarmkit.v1.GetUnlockKeyResponse") +} + +type authenticatedWrapperCAServer struct { + local CAServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperCAServer(local CAServer, authorize func(context.Context, []string) error) CAServer { + return &authenticatedWrapperCAServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) { + + return p.local.GetRootCACertificate(ctx, r) +} + +func (p *authenticatedWrapperCAServer) GetUnlockKey(ctx context.Context, r *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetUnlockKey(ctx, r) +} + +type authenticatedWrapperNodeCAServer struct { + local NodeCAServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperNodeCAServer(local NodeCAServer, authorize func(context.Context, []string) error) NodeCAServer { + return &authenticatedWrapperNodeCAServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) { + + return p.local.IssueNodeCertificate(ctx, r) +} + +func (p *authenticatedWrapperNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) { + + return p.local.NodeCertificateStatus(ctx, r) +} + +func (m *NodeCertificateStatusRequest) Copy() *NodeCertificateStatusRequest { + if m == nil { + return nil + } + o := &NodeCertificateStatusRequest{} + o.CopyFrom(m) + return o +} + +func (m *NodeCertificateStatusRequest) CopyFrom(src interface{}) { + + o := src.(*NodeCertificateStatusRequest) + *m = *o +} + +func (m *NodeCertificateStatusResponse) Copy() *NodeCertificateStatusResponse { + if m == nil { + return nil + } + o := &NodeCertificateStatusResponse{} + o.CopyFrom(m) + return o +} + +func (m *NodeCertificateStatusResponse) CopyFrom(src interface{}) { + + o := src.(*NodeCertificateStatusResponse) + *m = *o + if o.Status != nil { + m.Status = &IssuanceStatus{} + deepcopy.Copy(m.Status, o.Status) + } + if o.Certificate != nil { + m.Certificate = &Certificate{} + deepcopy.Copy(m.Certificate, o.Certificate) + } +} + +func (m *IssueNodeCertificateRequest) Copy() *IssueNodeCertificateRequest { + if m == nil { + return nil + } + o := &IssueNodeCertificateRequest{} + o.CopyFrom(m) + return o +} + +func (m *IssueNodeCertificateRequest) CopyFrom(src interface{}) { + + o := src.(*IssueNodeCertificateRequest) + *m = *o + if o.CSR != nil { + m.CSR = make([]byte, len(o.CSR)) + copy(m.CSR, o.CSR) + } +} + +func (m *IssueNodeCertificateResponse) Copy() *IssueNodeCertificateResponse { + if m == nil { + return nil + } + o := &IssueNodeCertificateResponse{} + o.CopyFrom(m) + return o +} + +func (m *IssueNodeCertificateResponse) CopyFrom(src interface{}) { + + o := src.(*IssueNodeCertificateResponse) + *m = *o +} + +func (m *GetRootCACertificateRequest) Copy() *GetRootCACertificateRequest { + if m == nil { + return nil + } + o := &GetRootCACertificateRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetRootCACertificateRequest) CopyFrom(src interface{}) {} +func (m *GetRootCACertificateResponse) Copy() *GetRootCACertificateResponse { + if m == nil { + return nil + } + o := &GetRootCACertificateResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetRootCACertificateResponse) CopyFrom(src interface{}) { + + o := src.(*GetRootCACertificateResponse) + *m = *o + if o.Certificate != nil { + m.Certificate = make([]byte, len(o.Certificate)) + copy(m.Certificate, o.Certificate) + } +} + +func (m *GetUnlockKeyRequest) Copy() *GetUnlockKeyRequest { + if m == nil { + return nil + } + o := &GetUnlockKeyRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetUnlockKeyRequest) CopyFrom(src interface{}) {} +func (m *GetUnlockKeyResponse) Copy() *GetUnlockKeyResponse { + if m == nil { + return nil + } + o := &GetUnlockKeyResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetUnlockKeyResponse) CopyFrom(src interface{}) { + + o := src.(*GetUnlockKeyResponse) + *m = *o + if o.UnlockKey != nil { + m.UnlockKey = make([]byte, len(o.UnlockKey)) + copy(m.UnlockKey, o.UnlockKey) + } + deepcopy.Copy(&m.Version, &o.Version) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for CA service + +type CAClient interface { + GetRootCACertificate(ctx context.Context, in *GetRootCACertificateRequest, opts ...grpc.CallOption) (*GetRootCACertificateResponse, error) + // GetUnlockKey returns the current unlock key for the cluster for the role of the client + // asking. + GetUnlockKey(ctx context.Context, in *GetUnlockKeyRequest, opts ...grpc.CallOption) (*GetUnlockKeyResponse, error) +} + +type cAClient struct { + cc *grpc.ClientConn +} + +func NewCAClient(cc *grpc.ClientConn) CAClient { + return &cAClient{cc} +} + +func (c *cAClient) GetRootCACertificate(ctx context.Context, in *GetRootCACertificateRequest, opts ...grpc.CallOption) (*GetRootCACertificateResponse, error) { + out := new(GetRootCACertificateResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.CA/GetRootCACertificate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cAClient) GetUnlockKey(ctx context.Context, in *GetUnlockKeyRequest, opts ...grpc.CallOption) (*GetUnlockKeyResponse, error) { + out := new(GetUnlockKeyResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.CA/GetUnlockKey", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for CA service + +type CAServer interface { + GetRootCACertificate(context.Context, *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) + // GetUnlockKey returns the current unlock key for the cluster for the role of the client + // asking. + GetUnlockKey(context.Context, *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) +} + +func RegisterCAServer(s *grpc.Server, srv CAServer) { + s.RegisterService(&_CA_serviceDesc, srv) +} + +func _CA_GetRootCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRootCACertificateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CAServer).GetRootCACertificate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.CA/GetRootCACertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CAServer).GetRootCACertificate(ctx, req.(*GetRootCACertificateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CA_GetUnlockKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUnlockKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CAServer).GetUnlockKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.CA/GetUnlockKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CAServer).GetUnlockKey(ctx, req.(*GetUnlockKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _CA_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.CA", + HandlerType: (*CAServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetRootCACertificate", + Handler: _CA_GetRootCACertificate_Handler, + }, + { + MethodName: "GetUnlockKey", + Handler: _CA_GetUnlockKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/ca.proto", +} + +// Client API for NodeCA service + +type NodeCAClient interface { + IssueNodeCertificate(ctx context.Context, in *IssueNodeCertificateRequest, opts ...grpc.CallOption) (*IssueNodeCertificateResponse, error) + NodeCertificateStatus(ctx context.Context, in *NodeCertificateStatusRequest, opts ...grpc.CallOption) (*NodeCertificateStatusResponse, error) +} + +type nodeCAClient struct { + cc *grpc.ClientConn +} + +func NewNodeCAClient(cc *grpc.ClientConn) NodeCAClient { + return &nodeCAClient{cc} +} + +func (c *nodeCAClient) IssueNodeCertificate(ctx context.Context, in *IssueNodeCertificateRequest, opts ...grpc.CallOption) (*IssueNodeCertificateResponse, error) { + out := new(IssueNodeCertificateResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.NodeCA/IssueNodeCertificate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeCAClient) NodeCertificateStatus(ctx context.Context, in *NodeCertificateStatusRequest, opts ...grpc.CallOption) (*NodeCertificateStatusResponse, error) { + out := new(NodeCertificateStatusResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.NodeCA/NodeCertificateStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for NodeCA service + +type NodeCAServer interface { + IssueNodeCertificate(context.Context, *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) + NodeCertificateStatus(context.Context, *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) +} + +func RegisterNodeCAServer(s *grpc.Server, srv NodeCAServer) { + s.RegisterService(&_NodeCA_serviceDesc, srv) +} + +func _NodeCA_IssueNodeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IssueNodeCertificateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeCAServer).IssueNodeCertificate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.NodeCA/IssueNodeCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeCAServer).IssueNodeCertificate(ctx, req.(*IssueNodeCertificateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NodeCA_NodeCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeCertificateStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeCAServer).NodeCertificateStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.NodeCA/NodeCertificateStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeCAServer).NodeCertificateStatus(ctx, req.(*NodeCertificateStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NodeCA_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.NodeCA", + HandlerType: (*NodeCAServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "IssueNodeCertificate", + Handler: _NodeCA_IssueNodeCertificate_Handler, + }, + { + MethodName: "NodeCertificateStatus", + Handler: _NodeCA_NodeCertificateStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/ca.proto", +} + +func (m *NodeCertificateStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeCertificateStatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + return i, nil +} + +func (m *NodeCertificateStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeCertificateStatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Status != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Status.Size())) + n1, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Certificate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Certificate.Size())) + n2, err := m.Certificate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *IssueNodeCertificateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IssueNodeCertificateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Role)) + } + if len(m.CSR) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.CSR))) + i += copy(dAtA[i:], m.CSR) + } + if len(m.Token) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + } + if m.Availability != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Availability)) + } + return i, nil +} + +func (m *IssueNodeCertificateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IssueNodeCertificateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if m.NodeMembership != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.NodeMembership)) + } + return i, nil +} + +func (m *GetRootCACertificateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRootCACertificateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetRootCACertificateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRootCACertificateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Certificate) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.Certificate))) + i += copy(dAtA[i:], m.Certificate) + } + return i, nil +} + +func (m *GetUnlockKeyRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetUnlockKeyRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetUnlockKeyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetUnlockKeyResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.UnlockKey) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.UnlockKey))) + i += copy(dAtA[i:], m.UnlockKey) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Version.Size())) + n3, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func encodeVarintCa(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyCAServer struct { + local CAServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyCAServer(local CAServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) CAServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyCAServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyCAServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetRootCACertificate(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewCAClient(conn).GetRootCACertificate(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetRootCACertificate(ctx, r) + } + return nil, err + } + return NewCAClient(conn).GetRootCACertificate(modCtx, r) + } + return resp, err +} + +func (p *raftProxyCAServer) GetUnlockKey(ctx context.Context, r *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetUnlockKey(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewCAClient(conn).GetUnlockKey(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetUnlockKey(ctx, r) + } + return nil, err + } + return NewCAClient(conn).GetUnlockKey(modCtx, r) + } + return resp, err +} + +type raftProxyNodeCAServer struct { + local NodeCAServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) NodeCAServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyNodeCAServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyNodeCAServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyNodeCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.IssueNodeCertificate(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.IssueNodeCertificate(ctx, r) + } + return nil, err + } + return NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r) + } + return resp, err +} + +func (p *raftProxyNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.NodeCertificateStatus(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.NodeCertificateStatus(ctx, r) + } + return nil, err + } + return NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r) + } + return resp, err +} + +func (m *NodeCertificateStatusRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + return n +} + +func (m *NodeCertificateStatusResponse) Size() (n int) { + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovCa(uint64(l)) + } + if m.Certificate != nil { + l = m.Certificate.Size() + n += 1 + l + sovCa(uint64(l)) + } + return n +} + +func (m *IssueNodeCertificateRequest) Size() (n int) { + var l int + _ = l + if m.Role != 0 { + n += 1 + sovCa(uint64(m.Role)) + } + l = len(m.CSR) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + l = len(m.Token) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + if m.Availability != 0 { + n += 1 + sovCa(uint64(m.Availability)) + } + return n +} + +func (m *IssueNodeCertificateResponse) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + if m.NodeMembership != 0 { + n += 1 + sovCa(uint64(m.NodeMembership)) + } + return n +} + +func (m *GetRootCACertificateRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetRootCACertificateResponse) Size() (n int) { + var l int + _ = l + l = len(m.Certificate) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + return n +} + +func (m *GetUnlockKeyRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetUnlockKeyResponse) Size() (n int) { + var l int + _ = l + l = len(m.UnlockKey) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + l = m.Version.Size() + n += 1 + l + sovCa(uint64(l)) + return n +} + +func sovCa(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozCa(x uint64) (n int) { + return sovCa(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *NodeCertificateStatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCertificateStatusRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *NodeCertificateStatusResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCertificateStatusResponse{`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "IssuanceStatus", "IssuanceStatus", 1) + `,`, + `Certificate:` + strings.Replace(fmt.Sprintf("%v", this.Certificate), "Certificate", "Certificate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IssueNodeCertificateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IssueNodeCertificateRequest{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `CSR:` + fmt.Sprintf("%v", this.CSR) + `,`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `Availability:` + fmt.Sprintf("%v", this.Availability) + `,`, + `}`, + }, "") + return s +} +func (this *IssueNodeCertificateResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IssueNodeCertificateResponse{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `NodeMembership:` + fmt.Sprintf("%v", this.NodeMembership) + `,`, + `}`, + }, "") + return s +} +func (this *GetRootCACertificateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetRootCACertificateRequest{`, + `}`, + }, "") + return s +} +func (this *GetRootCACertificateResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetRootCACertificateResponse{`, + `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`, + `}`, + }, "") + return s +} +func (this *GetUnlockKeyRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetUnlockKeyRequest{`, + `}`, + }, "") + return s +} +func (this *GetUnlockKeyResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetUnlockKeyResponse{`, + `UnlockKey:` + fmt.Sprintf("%v", this.UnlockKey) + `,`, + `Version:` + strings.Replace(strings.Replace(this.Version.String(), "Version", "Version", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringCa(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *NodeCertificateStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCertificateStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCertificateStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeCertificateStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCertificateStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCertificateStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &IssuanceStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Certificate == nil { + m.Certificate = &Certificate{} + } + if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IssueNodeCertificateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IssueNodeCertificateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IssueNodeCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CSR = append(m.CSR[:0], dAtA[iNdEx:postIndex]...) + if m.CSR == nil { + m.CSR = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Availability", wireType) + } + m.Availability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Availability |= (NodeSpec_Availability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IssueNodeCertificateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IssueNodeCertificateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IssueNodeCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeMembership", wireType) + } + m.NodeMembership = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeMembership |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRootCACertificateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRootCACertificateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRootCACertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRootCACertificateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRootCACertificateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRootCACertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) + if m.Certificate == nil { + m.Certificate = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetUnlockKeyRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetUnlockKeyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetUnlockKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetUnlockKeyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetUnlockKeyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetUnlockKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnlockKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnlockKey = append(m.UnlockKey[:0], dAtA[iNdEx:postIndex]...) + if m.UnlockKey == nil { + m.UnlockKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCa(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthCa + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipCa(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthCa = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCa = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/ca.proto", fileDescriptorCa) } + +var fileDescriptorCa = []byte{ + // 638 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x4c, + 0x10, 0xee, 0xba, 0xfd, 0xd3, 0xbf, 0xd3, 0xd0, 0xa2, 0xa5, 0x95, 0x4c, 0x9a, 0x3a, 0x95, 0x39, + 0xb4, 0x20, 0x61, 0xb7, 0x01, 0x09, 0x09, 0x2e, 0x24, 0x41, 0xaa, 0x2a, 0x54, 0x84, 0xb6, 0x82, + 0x6b, 0xe5, 0x38, 0xdb, 0x74, 0x15, 0xc7, 0x6b, 0xbc, 0xeb, 0x42, 0x6e, 0x48, 0x20, 0xde, 0x00, + 0xc1, 0x89, 0x47, 0xe0, 0x39, 0x2a, 0x4e, 0x48, 0x5c, 0x38, 0x55, 0xd4, 0x0f, 0xc0, 0x33, 0x20, + 0xaf, 0x6d, 0x9a, 0xb4, 0x4e, 0x5a, 0x4e, 0xf1, 0xce, 0x7c, 0xdf, 0x37, 0x33, 0xdf, 0x4e, 0x16, + 0xd6, 0xbb, 0x4c, 0x1e, 0x46, 0x6d, 0xcb, 0xe5, 0x7d, 0xbb, 0xc3, 0xdd, 0x1e, 0x0d, 0x6d, 0xf1, + 0xda, 0x09, 0xfb, 0x3d, 0x26, 0x6d, 0x27, 0x60, 0xb6, 0xeb, 0x58, 0x41, 0xc8, 0x25, 0xc7, 0x38, + 0xcd, 0x5a, 0x79, 0xd6, 0x3a, 0xda, 0xaa, 0xdc, 0xb9, 0x84, 0x2c, 0x07, 0x01, 0x15, 0x29, 0xff, + 0x52, 0xac, 0x08, 0xa8, 0x9b, 0x63, 0x97, 0xba, 0xbc, 0xcb, 0xd5, 0xa7, 0x9d, 0x7c, 0x65, 0xd1, + 0x07, 0x13, 0x14, 0x14, 0xa2, 0x1d, 0x1d, 0xd8, 0x81, 0x17, 0x75, 0x99, 0x9f, 0xfd, 0xa4, 0x44, + 0xb3, 0x05, 0xd5, 0x67, 0xbc, 0x43, 0x5b, 0x34, 0x94, 0xec, 0x80, 0xb9, 0x8e, 0xa4, 0x7b, 0xd2, + 0x91, 0x91, 0x20, 0xf4, 0x55, 0x44, 0x85, 0xc4, 0xb7, 0x60, 0xd6, 0xe7, 0x1d, 0xba, 0xcf, 0x3a, + 0x3a, 0x5a, 0x43, 0x1b, 0x73, 0x4d, 0x88, 0x4f, 0x6a, 0xa5, 0x84, 0xb2, 0xf3, 0x84, 0x94, 0x92, + 0xd4, 0x4e, 0xc7, 0xfc, 0x82, 0x60, 0x75, 0x8c, 0x8a, 0x08, 0xb8, 0x2f, 0x28, 0x7e, 0x08, 0x25, + 0xa1, 0x22, 0x4a, 0x65, 0xbe, 0x6e, 0x5a, 0x17, 0x2d, 0xb3, 0x76, 0x84, 0x88, 0x1c, 0xdf, 0xcd, + 0xb9, 0x19, 0x03, 0x37, 0x60, 0xde, 0x3d, 0x13, 0xd6, 0x35, 0x25, 0x50, 0x2b, 0x12, 0x18, 0xaa, + 0x4f, 0x86, 0x39, 0xe6, 0x0f, 0x04, 0x2b, 0x89, 0x3a, 0x3d, 0xd7, 0x65, 0x3e, 0xe5, 0x7d, 0x98, + 0x09, 0xb9, 0x47, 0x55, 0x73, 0x0b, 0xf5, 0x6a, 0x91, 0x76, 0xc2, 0x24, 0xdc, 0xa3, 0x4d, 0x4d, + 0x47, 0x44, 0xa1, 0xf1, 0x4d, 0x98, 0x76, 0x45, 0xa8, 0x1a, 0x2a, 0x37, 0x67, 0xe3, 0x93, 0xda, + 0x74, 0x6b, 0x8f, 0x90, 0x24, 0x86, 0x97, 0xe0, 0x3f, 0xc9, 0x7b, 0xd4, 0xd7, 0xa7, 0x13, 0xd3, + 0x48, 0x7a, 0xc0, 0xbb, 0x50, 0x76, 0x8e, 0x1c, 0xe6, 0x39, 0x6d, 0xe6, 0x31, 0x39, 0xd0, 0x67, + 0x54, 0xb9, 0xdb, 0xe3, 0xca, 0xed, 0x05, 0xd4, 0xb5, 0x1a, 0x43, 0x04, 0x32, 0x42, 0x37, 0x3f, + 0x22, 0xa8, 0x16, 0x4f, 0x95, 0xb9, 0x7e, 0x95, 0xcb, 0xc3, 0xcf, 0x61, 0x51, 0x81, 0xfa, 0xb4, + 0xdf, 0xa6, 0xa1, 0x38, 0x64, 0x81, 0x9a, 0x68, 0xa1, 0xbe, 0x3e, 0xb1, 0xaf, 0xdd, 0xbf, 0x70, + 0xb2, 0x90, 0xf0, 0xcf, 0xce, 0xe6, 0x2a, 0xac, 0x6c, 0x53, 0x49, 0x38, 0x97, 0xad, 0xc6, 0x45, + 0xb3, 0xcd, 0xc7, 0x50, 0x2d, 0x4e, 0x67, 0x5d, 0xaf, 0x8d, 0xde, 0x77, 0xd2, 0x79, 0x79, 0xf4, + 0x3a, 0x97, 0xe1, 0xc6, 0x36, 0x95, 0x2f, 0x7c, 0x8f, 0xbb, 0xbd, 0xa7, 0x74, 0x90, 0x0b, 0x87, + 0xb0, 0x34, 0x1a, 0xce, 0x04, 0x57, 0x01, 0x22, 0x15, 0xdc, 0xef, 0xd1, 0x41, 0xa6, 0x37, 0x17, + 0xe5, 0x30, 0xfc, 0x08, 0x66, 0x8f, 0x68, 0x28, 0x18, 0xf7, 0xb3, 0xdd, 0x5a, 0x29, 0x1a, 0xfc, + 0x65, 0x0a, 0x69, 0xce, 0x1c, 0x9f, 0xd4, 0xa6, 0x48, 0xce, 0xa8, 0xbf, 0xd7, 0x40, 0x6b, 0x35, + 0xf0, 0x3b, 0xa4, 0x6a, 0x5f, 0x18, 0x0a, 0xdb, 0x45, 0x5a, 0x13, 0xdc, 0xa9, 0x6c, 0x5e, 0x9d, + 0x90, 0x8e, 0x67, 0xfe, 0xff, 0xed, 0xeb, 0xef, 0xcf, 0x9a, 0x76, 0x1d, 0xe1, 0x37, 0x50, 0x1e, + 0x36, 0x00, 0xaf, 0x8f, 0xd1, 0x3a, 0xef, 0x5c, 0x65, 0xe3, 0x72, 0x60, 0x56, 0x6c, 0x59, 0x15, + 0x5b, 0x84, 0x6b, 0x0a, 0x79, 0xb7, 0xef, 0xf8, 0x4e, 0x97, 0x86, 0xf5, 0x4f, 0x1a, 0xa8, 0xbd, + 0xca, 0xac, 0x28, 0xda, 0xca, 0x62, 0x2b, 0x26, 0xfc, 0x2b, 0x8b, 0xad, 0x98, 0xb4, 0xf0, 0x43, + 0x56, 0x7c, 0x40, 0xb0, 0x5c, 0xf8, 0x24, 0xe1, 0xcd, 0x71, 0x6b, 0x3d, 0xee, 0x0d, 0xac, 0x6c, + 0xfd, 0x03, 0xe3, 0x7c, 0x23, 0x4d, 0xfd, 0xf8, 0xd4, 0x98, 0xfa, 0x79, 0x6a, 0x4c, 0xbd, 0x8d, + 0x0d, 0x74, 0x1c, 0x1b, 0xe8, 0x7b, 0x6c, 0xa0, 0x5f, 0xb1, 0x81, 0xda, 0x25, 0xf5, 0x02, 0xdf, + 0xfb, 0x13, 0x00, 0x00, 0xff, 0xff, 0xe1, 0xda, 0xca, 0xba, 0x67, 0x06, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/ca.proto b/extender/code/vendor/github.com/docker/swarmkit/api/ca.proto new file mode 100644 index 000000000..e26c8f35a --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/ca.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "github.com/docker/swarmkit/api/specs.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// CA defines the RPC methods for requesting certificates from a CA. + +service CA { + rpc GetRootCACertificate(GetRootCACertificateRequest) returns (GetRootCACertificateResponse) { + option (docker.protobuf.plugin.tls_authorization) = { insecure: true }; + }; + // GetUnlockKey returns the current unlock key for the cluster for the role of the client + // asking. + rpc GetUnlockKey(GetUnlockKeyRequest) returns (GetUnlockKeyResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: ["swarm-manager"] }; + }; +} + +service NodeCA { + rpc IssueNodeCertificate(IssueNodeCertificateRequest) returns (IssueNodeCertificateResponse) { + option (docker.protobuf.plugin.tls_authorization) = { insecure: true }; + }; + rpc NodeCertificateStatus(NodeCertificateStatusRequest) returns (NodeCertificateStatusResponse) { + option (docker.protobuf.plugin.tls_authorization) = { insecure: true }; + }; +} + +message NodeCertificateStatusRequest { + string node_id = 1; +} + +message NodeCertificateStatusResponse { + IssuanceStatus status = 1; + Certificate certificate = 2; +} + +message IssueNodeCertificateRequest { + // DEPRECATED: Role is now selected based on which secret is matched. + NodeRole role = 1 [deprecated=true]; + + // CSR is the certificate signing request. + bytes csr = 2 [(gogoproto.customname) = "CSR"]; + + // Token represents a user-provided string that is necessary for new + // nodes to join the cluster + string token = 3; + + // Availability allows a user to control the current scheduling status of a node + NodeSpec.Availability availability = 4; +} + +message IssueNodeCertificateResponse { + string node_id = 1; + NodeSpec.Membership node_membership = 2; +} + +message GetRootCACertificateRequest {} + +message GetRootCACertificateResponse { + bytes certificate = 1; +} + +message GetUnlockKeyRequest {} + +message GetUnlockKeyResponse { + bytes unlock_key = 1; + Version version = 2 [(gogoproto.nullable) = false]; +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/control.pb.go b/extender/code/vendor/github.com/docker/swarmkit/api/control.pb.go new file mode 100644 index 000000000..2f158042f --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/control.pb.go @@ -0,0 +1,16088 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/control.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type UpdateServiceRequest_Rollback int32 + +const ( + // This is not a rollback. The spec field of the request will + // be honored. + UpdateServiceRequest_NONE UpdateServiceRequest_Rollback = 0 + // Roll back the service - get spec from the service's + // previous_spec. + UpdateServiceRequest_PREVIOUS UpdateServiceRequest_Rollback = 1 +) + +var UpdateServiceRequest_Rollback_name = map[int32]string{ + 0: "NONE", + 1: "PREVIOUS", +} +var UpdateServiceRequest_Rollback_value = map[string]int32{ + "NONE": 0, + "PREVIOUS": 1, +} + +func (x UpdateServiceRequest_Rollback) String() string { + return proto.EnumName(UpdateServiceRequest_Rollback_name, int32(x)) +} +func (UpdateServiceRequest_Rollback) EnumDescriptor() ([]byte, []int) { + return fileDescriptorControl, []int{18, 0} +} + +type GetNodeRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` +} + +func (m *GetNodeRequest) Reset() { *m = GetNodeRequest{} } +func (*GetNodeRequest) ProtoMessage() {} +func (*GetNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{0} } + +type GetNodeResponse struct { + Node *Node `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` +} + +func (m *GetNodeResponse) Reset() { *m = GetNodeResponse{} } +func (*GetNodeResponse) ProtoMessage() {} +func (*GetNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{1} } + +type ListNodesRequest struct { + Filters *ListNodesRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListNodesRequest) Reset() { *m = ListNodesRequest{} } +func (*ListNodesRequest) ProtoMessage() {} +func (*ListNodesRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{2} } + +type ListNodesRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Memberships []NodeSpec_Membership `protobuf:"varint,4,rep,name=memberships,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"memberships,omitempty"` + Roles []NodeRole `protobuf:"varint,5,rep,name=roles,enum=docker.swarmkit.v1.NodeRole" json:"roles,omitempty"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,6,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListNodesRequest_Filters) Reset() { *m = ListNodesRequest_Filters{} } +func (*ListNodesRequest_Filters) ProtoMessage() {} +func (*ListNodesRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{2, 0} +} + +type ListNodesResponse struct { + Nodes []*Node `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"` +} + +func (m *ListNodesResponse) Reset() { *m = ListNodesResponse{} } +func (*ListNodesResponse) ProtoMessage() {} +func (*ListNodesResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{3} } + +// UpdateNodeRequest requests an update to the specified node. This may be used +// to request a new availability for a node, such as PAUSE. Invalid updates +// will be denied and cause an error. +type UpdateNodeRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + NodeVersion *Version `protobuf:"bytes,2,opt,name=node_version,json=nodeVersion" json:"node_version,omitempty"` + Spec *NodeSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *UpdateNodeRequest) Reset() { *m = UpdateNodeRequest{} } +func (*UpdateNodeRequest) ProtoMessage() {} +func (*UpdateNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{4} } + +type UpdateNodeResponse struct { + Node *Node `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` +} + +func (m *UpdateNodeResponse) Reset() { *m = UpdateNodeResponse{} } +func (*UpdateNodeResponse) ProtoMessage() {} +func (*UpdateNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{5} } + +// RemoveNodeRequest requests to delete the specified node from store. +type RemoveNodeRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` +} + +func (m *RemoveNodeRequest) Reset() { *m = RemoveNodeRequest{} } +func (*RemoveNodeRequest) ProtoMessage() {} +func (*RemoveNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{6} } + +type RemoveNodeResponse struct { +} + +func (m *RemoveNodeResponse) Reset() { *m = RemoveNodeResponse{} } +func (*RemoveNodeResponse) ProtoMessage() {} +func (*RemoveNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{7} } + +type GetTaskRequest struct { + TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +} + +func (m *GetTaskRequest) Reset() { *m = GetTaskRequest{} } +func (*GetTaskRequest) ProtoMessage() {} +func (*GetTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{8} } + +type GetTaskResponse struct { + Task *Task `protobuf:"bytes,1,opt,name=task" json:"task,omitempty"` +} + +func (m *GetTaskResponse) Reset() { *m = GetTaskResponse{} } +func (*GetTaskResponse) ProtoMessage() {} +func (*GetTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{9} } + +type RemoveTaskRequest struct { + TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +} + +func (m *RemoveTaskRequest) Reset() { *m = RemoveTaskRequest{} } +func (*RemoveTaskRequest) ProtoMessage() {} +func (*RemoveTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{10} } + +type RemoveTaskResponse struct { +} + +func (m *RemoveTaskResponse) Reset() { *m = RemoveTaskResponse{} } +func (*RemoveTaskResponse) ProtoMessage() {} +func (*RemoveTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{11} } + +type ListTasksRequest struct { + Filters *ListTasksRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListTasksRequest) Reset() { *m = ListTasksRequest{} } +func (*ListTasksRequest) ProtoMessage() {} +func (*ListTasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{12} } + +type ListTasksRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ServiceIDs []string `protobuf:"bytes,4,rep,name=service_ids,json=serviceIds" json:"service_ids,omitempty"` + NodeIDs []string `protobuf:"bytes,5,rep,name=node_ids,json=nodeIds" json:"node_ids,omitempty"` + DesiredStates []TaskState `protobuf:"varint,6,rep,name=desired_states,json=desiredStates,enum=docker.swarmkit.v1.TaskState" json:"desired_states,omitempty"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,7,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` + Runtimes []string `protobuf:"bytes,9,rep,name=runtimes" json:"runtimes,omitempty"` + // UpToDate matches tasks that are consistent with the current + // service definition. + // Note: this is intended for internal status reporting rather + // than being exposed to users. It may be removed in the future. + UpToDate bool `protobuf:"varint,8,opt,name=up_to_date,json=upToDate,proto3" json:"up_to_date,omitempty"` +} + +func (m *ListTasksRequest_Filters) Reset() { *m = ListTasksRequest_Filters{} } +func (*ListTasksRequest_Filters) ProtoMessage() {} +func (*ListTasksRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{12, 0} +} + +type ListTasksResponse struct { + Tasks []*Task `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"` +} + +func (m *ListTasksResponse) Reset() { *m = ListTasksResponse{} } +func (*ListTasksResponse) ProtoMessage() {} +func (*ListTasksResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{13} } + +type CreateServiceRequest struct { + Spec *ServiceSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateServiceRequest) Reset() { *m = CreateServiceRequest{} } +func (*CreateServiceRequest) ProtoMessage() {} +func (*CreateServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{14} } + +type CreateServiceResponse struct { + Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *CreateServiceResponse) Reset() { *m = CreateServiceResponse{} } +func (*CreateServiceResponse) ProtoMessage() {} +func (*CreateServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{15} } + +type GetServiceRequest struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + InsertDefaults bool `protobuf:"varint,2,opt,name=insert_defaults,json=insertDefaults,proto3" json:"insert_defaults,omitempty"` +} + +func (m *GetServiceRequest) Reset() { *m = GetServiceRequest{} } +func (*GetServiceRequest) ProtoMessage() {} +func (*GetServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{16} } + +type GetServiceResponse struct { + Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *GetServiceResponse) Reset() { *m = GetServiceResponse{} } +func (*GetServiceResponse) ProtoMessage() {} +func (*GetServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{17} } + +type UpdateServiceRequest struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + ServiceVersion *Version `protobuf:"bytes,2,opt,name=service_version,json=serviceVersion" json:"service_version,omitempty"` + Spec *ServiceSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` + // Rollback may be set to PREVIOUS to request a rollback (the service's + // spec will be set to the value of its previous_spec field). In this + // case, the spec field of this request is ignored. + Rollback UpdateServiceRequest_Rollback `protobuf:"varint,4,opt,name=rollback,proto3,enum=docker.swarmkit.v1.UpdateServiceRequest_Rollback" json:"rollback,omitempty"` +} + +func (m *UpdateServiceRequest) Reset() { *m = UpdateServiceRequest{} } +func (*UpdateServiceRequest) ProtoMessage() {} +func (*UpdateServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{18} } + +type UpdateServiceResponse struct { + Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *UpdateServiceResponse) Reset() { *m = UpdateServiceResponse{} } +func (*UpdateServiceResponse) ProtoMessage() {} +func (*UpdateServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{19} } + +type RemoveServiceRequest struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` +} + +func (m *RemoveServiceRequest) Reset() { *m = RemoveServiceRequest{} } +func (*RemoveServiceRequest) ProtoMessage() {} +func (*RemoveServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{20} } + +type RemoveServiceResponse struct { +} + +func (m *RemoveServiceResponse) Reset() { *m = RemoveServiceResponse{} } +func (*RemoveServiceResponse) ProtoMessage() {} +func (*RemoveServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{21} } + +type ListServicesRequest struct { + Filters *ListServicesRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListServicesRequest) Reset() { *m = ListServicesRequest{} } +func (*ListServicesRequest) ProtoMessage() {} +func (*ListServicesRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{22} } + +type ListServicesRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` + Runtimes []string `protobuf:"bytes,5,rep,name=runtimes" json:"runtimes,omitempty"` +} + +func (m *ListServicesRequest_Filters) Reset() { *m = ListServicesRequest_Filters{} } +func (*ListServicesRequest_Filters) ProtoMessage() {} +func (*ListServicesRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{22, 0} +} + +type ListServicesResponse struct { + Services []*Service `protobuf:"bytes,1,rep,name=services" json:"services,omitempty"` +} + +func (m *ListServicesResponse) Reset() { *m = ListServicesResponse{} } +func (*ListServicesResponse) ProtoMessage() {} +func (*ListServicesResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{23} } + +type CreateNetworkRequest struct { + Spec *NetworkSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateNetworkRequest) Reset() { *m = CreateNetworkRequest{} } +func (*CreateNetworkRequest) ProtoMessage() {} +func (*CreateNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{24} } + +type CreateNetworkResponse struct { + Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"` +} + +func (m *CreateNetworkResponse) Reset() { *m = CreateNetworkResponse{} } +func (*CreateNetworkResponse) ProtoMessage() {} +func (*CreateNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{25} } + +type GetNetworkRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + NetworkID string `protobuf:"bytes,2,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` +} + +func (m *GetNetworkRequest) Reset() { *m = GetNetworkRequest{} } +func (*GetNetworkRequest) ProtoMessage() {} +func (*GetNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{26} } + +type GetNetworkResponse struct { + Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"` +} + +func (m *GetNetworkResponse) Reset() { *m = GetNetworkResponse{} } +func (*GetNetworkResponse) ProtoMessage() {} +func (*GetNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{27} } + +type RemoveNetworkRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + NetworkID string `protobuf:"bytes,2,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` +} + +func (m *RemoveNetworkRequest) Reset() { *m = RemoveNetworkRequest{} } +func (*RemoveNetworkRequest) ProtoMessage() {} +func (*RemoveNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{28} } + +type RemoveNetworkResponse struct { +} + +func (m *RemoveNetworkResponse) Reset() { *m = RemoveNetworkResponse{} } +func (*RemoveNetworkResponse) ProtoMessage() {} +func (*RemoveNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{29} } + +type ListNetworksRequest struct { + Filters *ListNetworksRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListNetworksRequest) Reset() { *m = ListNetworksRequest{} } +func (*ListNetworksRequest) ProtoMessage() {} +func (*ListNetworksRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{30} } + +type ListNetworksRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListNetworksRequest_Filters) Reset() { *m = ListNetworksRequest_Filters{} } +func (*ListNetworksRequest_Filters) ProtoMessage() {} +func (*ListNetworksRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{30, 0} +} + +type ListNetworksResponse struct { + Networks []*Network `protobuf:"bytes,1,rep,name=networks" json:"networks,omitempty"` +} + +func (m *ListNetworksResponse) Reset() { *m = ListNetworksResponse{} } +func (*ListNetworksResponse) ProtoMessage() {} +func (*ListNetworksResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{31} } + +type GetClusterRequest struct { + ClusterID string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{32} } + +type GetClusterResponse struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *GetClusterResponse) Reset() { *m = GetClusterResponse{} } +func (*GetClusterResponse) ProtoMessage() {} +func (*GetClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{33} } + +type ListClustersRequest struct { + Filters *ListClustersRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{34} } + +type ListClustersRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListClustersRequest_Filters) Reset() { *m = ListClustersRequest_Filters{} } +func (*ListClustersRequest_Filters) ProtoMessage() {} +func (*ListClustersRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{34, 0} +} + +type ListClustersResponse struct { + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{35} } + +// KeyRotation tells UpdateCluster what items to rotate +type KeyRotation struct { + // WorkerJoinToken tells UpdateCluster to rotate the worker secret token. + WorkerJoinToken bool `protobuf:"varint,1,opt,name=worker_join_token,json=workerJoinToken,proto3" json:"worker_join_token,omitempty"` + // ManagerJoinToken tells UpdateCluster to rotate the manager secret token. + ManagerJoinToken bool `protobuf:"varint,2,opt,name=manager_join_token,json=managerJoinToken,proto3" json:"manager_join_token,omitempty"` + // ManagerUnlockKey tells UpdateCluster to rotate the manager unlock key + ManagerUnlockKey bool `protobuf:"varint,3,opt,name=manager_unlock_key,json=managerUnlockKey,proto3" json:"manager_unlock_key,omitempty"` +} + +func (m *KeyRotation) Reset() { *m = KeyRotation{} } +func (*KeyRotation) ProtoMessage() {} +func (*KeyRotation) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{36} } + +type UpdateClusterRequest struct { + // ClusterID is the cluster ID to update. + ClusterID string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // ClusterVersion is the version of the cluster being updated. + ClusterVersion *Version `protobuf:"bytes,2,opt,name=cluster_version,json=clusterVersion" json:"cluster_version,omitempty"` + // Spec is the new spec to apply to the cluster. + Spec *ClusterSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` + // Rotation contains flags for join token and unlock key rotation + Rotation KeyRotation `protobuf:"bytes,4,opt,name=rotation" json:"rotation"` +} + +func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } +func (*UpdateClusterRequest) ProtoMessage() {} +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{37} } + +type UpdateClusterResponse struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *UpdateClusterResponse) Reset() { *m = UpdateClusterResponse{} } +func (*UpdateClusterResponse) ProtoMessage() {} +func (*UpdateClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{38} } + +// GetSecretRequest is the request to get a `Secret` object given a secret id. +type GetSecretRequest struct { + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` +} + +func (m *GetSecretRequest) Reset() { *m = GetSecretRequest{} } +func (*GetSecretRequest) ProtoMessage() {} +func (*GetSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{39} } + +// GetSecretResponse contains the Secret corresponding to the id in +// `GetSecretRequest`, but the `Secret.Spec.Data` field in each `Secret` +// object should be nil instead of actually containing the secret bytes. +type GetSecretResponse struct { + Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` +} + +func (m *GetSecretResponse) Reset() { *m = GetSecretResponse{} } +func (*GetSecretResponse) ProtoMessage() {} +func (*GetSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{40} } + +type UpdateSecretRequest struct { + // SecretID is the secret ID to update. + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` + // SecretVersion is the version of the secret being updated. + SecretVersion *Version `protobuf:"bytes,2,opt,name=secret_version,json=secretVersion" json:"secret_version,omitempty"` + // Spec is the new spec to apply to the Secret + // Only some fields are allowed to be updated. + Spec *SecretSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *UpdateSecretRequest) Reset() { *m = UpdateSecretRequest{} } +func (*UpdateSecretRequest) ProtoMessage() {} +func (*UpdateSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{41} } + +type UpdateSecretResponse struct { + Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` +} + +func (m *UpdateSecretResponse) Reset() { *m = UpdateSecretResponse{} } +func (*UpdateSecretResponse) ProtoMessage() {} +func (*UpdateSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{42} } + +// ListSecretRequest is the request to list all non-internal secrets in the secret store, +// or all secrets filtered by (name or name prefix or id prefix) and labels. +type ListSecretsRequest struct { + Filters *ListSecretsRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListSecretsRequest) Reset() { *m = ListSecretsRequest{} } +func (*ListSecretsRequest) ProtoMessage() {} +func (*ListSecretsRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{43} } + +type ListSecretsRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListSecretsRequest_Filters) Reset() { *m = ListSecretsRequest_Filters{} } +func (*ListSecretsRequest_Filters) ProtoMessage() {} +func (*ListSecretsRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{43, 0} +} + +// ListSecretResponse contains a list of all the secrets that match the name or +// name prefix filters provided in `ListSecretRequest`. The `Secret.Spec.Data` +// field in each `Secret` object should be nil instead of actually containing +// the secret bytes. +type ListSecretsResponse struct { + Secrets []*Secret `protobuf:"bytes,1,rep,name=secrets" json:"secrets,omitempty"` +} + +func (m *ListSecretsResponse) Reset() { *m = ListSecretsResponse{} } +func (*ListSecretsResponse) ProtoMessage() {} +func (*ListSecretsResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{44} } + +// CreateSecretRequest specifies a new secret (it will not update an existing +// secret) to create. +type CreateSecretRequest struct { + Spec *SecretSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateSecretRequest) Reset() { *m = CreateSecretRequest{} } +func (*CreateSecretRequest) ProtoMessage() {} +func (*CreateSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{45} } + +// CreateSecretResponse contains the newly created `Secret` corresponding to the +// name in `CreateSecretRequest`. The `Secret.Spec.Data` field should be nil instead +// of actually containing the secret bytes. +type CreateSecretResponse struct { + Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` +} + +func (m *CreateSecretResponse) Reset() { *m = CreateSecretResponse{} } +func (*CreateSecretResponse) ProtoMessage() {} +func (*CreateSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{46} } + +// RemoveSecretRequest contains the ID of the secret that should be removed. This +// removes all versions of the secret. +type RemoveSecretRequest struct { + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` +} + +func (m *RemoveSecretRequest) Reset() { *m = RemoveSecretRequest{} } +func (*RemoveSecretRequest) ProtoMessage() {} +func (*RemoveSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{47} } + +// RemoveSecretResponse is an empty object indicating the successful removal of +// a secret. +type RemoveSecretResponse struct { +} + +func (m *RemoveSecretResponse) Reset() { *m = RemoveSecretResponse{} } +func (*RemoveSecretResponse) ProtoMessage() {} +func (*RemoveSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{48} } + +// GetConfigRequest is the request to get a `Config` object given a config id. +type GetConfigRequest struct { + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` +} + +func (m *GetConfigRequest) Reset() { *m = GetConfigRequest{} } +func (*GetConfigRequest) ProtoMessage() {} +func (*GetConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{49} } + +// GetConfigResponse contains the Config corresponding to the id in +// `GetConfigRequest`. +type GetConfigResponse struct { + Config *Config `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` +} + +func (m *GetConfigResponse) Reset() { *m = GetConfigResponse{} } +func (*GetConfigResponse) ProtoMessage() {} +func (*GetConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{50} } + +type UpdateConfigRequest struct { + // ConfigID is the config ID to update. + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` + // ConfigVersion is the version of the config being updated. + ConfigVersion *Version `protobuf:"bytes,2,opt,name=config_version,json=configVersion" json:"config_version,omitempty"` + // Spec is the new spec to apply to the Config + // Only some fields are allowed to be updated. + Spec *ConfigSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *UpdateConfigRequest) Reset() { *m = UpdateConfigRequest{} } +func (*UpdateConfigRequest) ProtoMessage() {} +func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{51} } + +type UpdateConfigResponse struct { + Config *Config `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` +} + +func (m *UpdateConfigResponse) Reset() { *m = UpdateConfigResponse{} } +func (*UpdateConfigResponse) ProtoMessage() {} +func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{52} } + +// ListConfigRequest is the request to list all configs in the config store, +// or all configs filtered by (name or name prefix or id prefix) and labels. +type ListConfigsRequest struct { + Filters *ListConfigsRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListConfigsRequest) Reset() { *m = ListConfigsRequest{} } +func (*ListConfigsRequest) ProtoMessage() {} +func (*ListConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{53} } + +type ListConfigsRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListConfigsRequest_Filters) Reset() { *m = ListConfigsRequest_Filters{} } +func (*ListConfigsRequest_Filters) ProtoMessage() {} +func (*ListConfigsRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{53, 0} +} + +// ListConfigResponse contains a list of all the configs that match the name or +// name prefix filters provided in `ListConfigRequest`. +type ListConfigsResponse struct { + Configs []*Config `protobuf:"bytes,1,rep,name=configs" json:"configs,omitempty"` +} + +func (m *ListConfigsResponse) Reset() { *m = ListConfigsResponse{} } +func (*ListConfigsResponse) ProtoMessage() {} +func (*ListConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{54} } + +// CreateConfigRequest specifies a new config (it will not update an existing +// config) to create. +type CreateConfigRequest struct { + Spec *ConfigSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateConfigRequest) Reset() { *m = CreateConfigRequest{} } +func (*CreateConfigRequest) ProtoMessage() {} +func (*CreateConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{55} } + +// CreateConfigResponse contains the newly created `Config` corresponding to the +// name in `CreateConfigRequest`. +type CreateConfigResponse struct { + Config *Config `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` +} + +func (m *CreateConfigResponse) Reset() { *m = CreateConfigResponse{} } +func (*CreateConfigResponse) ProtoMessage() {} +func (*CreateConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{56} } + +// RemoveConfigRequest contains the ID of the config that should be removed. This +// removes all versions of the config. +type RemoveConfigRequest struct { + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` +} + +func (m *RemoveConfigRequest) Reset() { *m = RemoveConfigRequest{} } +func (*RemoveConfigRequest) ProtoMessage() {} +func (*RemoveConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{57} } + +// RemoveConfigResponse is an empty object indicating the successful removal of +// a config. +type RemoveConfigResponse struct { +} + +func (m *RemoveConfigResponse) Reset() { *m = RemoveConfigResponse{} } +func (*RemoveConfigResponse) ProtoMessage() {} +func (*RemoveConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{58} } + +func init() { + proto.RegisterType((*GetNodeRequest)(nil), "docker.swarmkit.v1.GetNodeRequest") + proto.RegisterType((*GetNodeResponse)(nil), "docker.swarmkit.v1.GetNodeResponse") + proto.RegisterType((*ListNodesRequest)(nil), "docker.swarmkit.v1.ListNodesRequest") + proto.RegisterType((*ListNodesRequest_Filters)(nil), "docker.swarmkit.v1.ListNodesRequest.Filters") + proto.RegisterType((*ListNodesResponse)(nil), "docker.swarmkit.v1.ListNodesResponse") + proto.RegisterType((*UpdateNodeRequest)(nil), "docker.swarmkit.v1.UpdateNodeRequest") + proto.RegisterType((*UpdateNodeResponse)(nil), "docker.swarmkit.v1.UpdateNodeResponse") + proto.RegisterType((*RemoveNodeRequest)(nil), "docker.swarmkit.v1.RemoveNodeRequest") + proto.RegisterType((*RemoveNodeResponse)(nil), "docker.swarmkit.v1.RemoveNodeResponse") + proto.RegisterType((*GetTaskRequest)(nil), "docker.swarmkit.v1.GetTaskRequest") + proto.RegisterType((*GetTaskResponse)(nil), "docker.swarmkit.v1.GetTaskResponse") + proto.RegisterType((*RemoveTaskRequest)(nil), "docker.swarmkit.v1.RemoveTaskRequest") + proto.RegisterType((*RemoveTaskResponse)(nil), "docker.swarmkit.v1.RemoveTaskResponse") + proto.RegisterType((*ListTasksRequest)(nil), "docker.swarmkit.v1.ListTasksRequest") + proto.RegisterType((*ListTasksRequest_Filters)(nil), "docker.swarmkit.v1.ListTasksRequest.Filters") + proto.RegisterType((*ListTasksResponse)(nil), "docker.swarmkit.v1.ListTasksResponse") + proto.RegisterType((*CreateServiceRequest)(nil), "docker.swarmkit.v1.CreateServiceRequest") + proto.RegisterType((*CreateServiceResponse)(nil), "docker.swarmkit.v1.CreateServiceResponse") + proto.RegisterType((*GetServiceRequest)(nil), "docker.swarmkit.v1.GetServiceRequest") + proto.RegisterType((*GetServiceResponse)(nil), "docker.swarmkit.v1.GetServiceResponse") + proto.RegisterType((*UpdateServiceRequest)(nil), "docker.swarmkit.v1.UpdateServiceRequest") + proto.RegisterType((*UpdateServiceResponse)(nil), "docker.swarmkit.v1.UpdateServiceResponse") + proto.RegisterType((*RemoveServiceRequest)(nil), "docker.swarmkit.v1.RemoveServiceRequest") + proto.RegisterType((*RemoveServiceResponse)(nil), "docker.swarmkit.v1.RemoveServiceResponse") + proto.RegisterType((*ListServicesRequest)(nil), "docker.swarmkit.v1.ListServicesRequest") + proto.RegisterType((*ListServicesRequest_Filters)(nil), "docker.swarmkit.v1.ListServicesRequest.Filters") + proto.RegisterType((*ListServicesResponse)(nil), "docker.swarmkit.v1.ListServicesResponse") + proto.RegisterType((*CreateNetworkRequest)(nil), "docker.swarmkit.v1.CreateNetworkRequest") + proto.RegisterType((*CreateNetworkResponse)(nil), "docker.swarmkit.v1.CreateNetworkResponse") + proto.RegisterType((*GetNetworkRequest)(nil), "docker.swarmkit.v1.GetNetworkRequest") + proto.RegisterType((*GetNetworkResponse)(nil), "docker.swarmkit.v1.GetNetworkResponse") + proto.RegisterType((*RemoveNetworkRequest)(nil), "docker.swarmkit.v1.RemoveNetworkRequest") + proto.RegisterType((*RemoveNetworkResponse)(nil), "docker.swarmkit.v1.RemoveNetworkResponse") + proto.RegisterType((*ListNetworksRequest)(nil), "docker.swarmkit.v1.ListNetworksRequest") + proto.RegisterType((*ListNetworksRequest_Filters)(nil), "docker.swarmkit.v1.ListNetworksRequest.Filters") + proto.RegisterType((*ListNetworksResponse)(nil), "docker.swarmkit.v1.ListNetworksResponse") + proto.RegisterType((*GetClusterRequest)(nil), "docker.swarmkit.v1.GetClusterRequest") + proto.RegisterType((*GetClusterResponse)(nil), "docker.swarmkit.v1.GetClusterResponse") + proto.RegisterType((*ListClustersRequest)(nil), "docker.swarmkit.v1.ListClustersRequest") + proto.RegisterType((*ListClustersRequest_Filters)(nil), "docker.swarmkit.v1.ListClustersRequest.Filters") + proto.RegisterType((*ListClustersResponse)(nil), "docker.swarmkit.v1.ListClustersResponse") + proto.RegisterType((*KeyRotation)(nil), "docker.swarmkit.v1.KeyRotation") + proto.RegisterType((*UpdateClusterRequest)(nil), "docker.swarmkit.v1.UpdateClusterRequest") + proto.RegisterType((*UpdateClusterResponse)(nil), "docker.swarmkit.v1.UpdateClusterResponse") + proto.RegisterType((*GetSecretRequest)(nil), "docker.swarmkit.v1.GetSecretRequest") + proto.RegisterType((*GetSecretResponse)(nil), "docker.swarmkit.v1.GetSecretResponse") + proto.RegisterType((*UpdateSecretRequest)(nil), "docker.swarmkit.v1.UpdateSecretRequest") + proto.RegisterType((*UpdateSecretResponse)(nil), "docker.swarmkit.v1.UpdateSecretResponse") + proto.RegisterType((*ListSecretsRequest)(nil), "docker.swarmkit.v1.ListSecretsRequest") + proto.RegisterType((*ListSecretsRequest_Filters)(nil), "docker.swarmkit.v1.ListSecretsRequest.Filters") + proto.RegisterType((*ListSecretsResponse)(nil), "docker.swarmkit.v1.ListSecretsResponse") + proto.RegisterType((*CreateSecretRequest)(nil), "docker.swarmkit.v1.CreateSecretRequest") + proto.RegisterType((*CreateSecretResponse)(nil), "docker.swarmkit.v1.CreateSecretResponse") + proto.RegisterType((*RemoveSecretRequest)(nil), "docker.swarmkit.v1.RemoveSecretRequest") + proto.RegisterType((*RemoveSecretResponse)(nil), "docker.swarmkit.v1.RemoveSecretResponse") + proto.RegisterType((*GetConfigRequest)(nil), "docker.swarmkit.v1.GetConfigRequest") + proto.RegisterType((*GetConfigResponse)(nil), "docker.swarmkit.v1.GetConfigResponse") + proto.RegisterType((*UpdateConfigRequest)(nil), "docker.swarmkit.v1.UpdateConfigRequest") + proto.RegisterType((*UpdateConfigResponse)(nil), "docker.swarmkit.v1.UpdateConfigResponse") + proto.RegisterType((*ListConfigsRequest)(nil), "docker.swarmkit.v1.ListConfigsRequest") + proto.RegisterType((*ListConfigsRequest_Filters)(nil), "docker.swarmkit.v1.ListConfigsRequest.Filters") + proto.RegisterType((*ListConfigsResponse)(nil), "docker.swarmkit.v1.ListConfigsResponse") + proto.RegisterType((*CreateConfigRequest)(nil), "docker.swarmkit.v1.CreateConfigRequest") + proto.RegisterType((*CreateConfigResponse)(nil), "docker.swarmkit.v1.CreateConfigResponse") + proto.RegisterType((*RemoveConfigRequest)(nil), "docker.swarmkit.v1.RemoveConfigRequest") + proto.RegisterType((*RemoveConfigResponse)(nil), "docker.swarmkit.v1.RemoveConfigResponse") + proto.RegisterEnum("docker.swarmkit.v1.UpdateServiceRequest_Rollback", UpdateServiceRequest_Rollback_name, UpdateServiceRequest_Rollback_value) +} + +type authenticatedWrapperControlServer struct { + local ControlServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperControlServer(local ControlServer, authorize func(context.Context, []string) error) ControlServer { + return &authenticatedWrapperControlServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperControlServer) GetNode(ctx context.Context, r *GetNodeRequest) (*GetNodeResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetNode(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListNodes(ctx context.Context, r *ListNodesRequest) (*ListNodesResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListNodes(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateNode(ctx context.Context, r *UpdateNodeRequest) (*UpdateNodeResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateNode(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveNode(ctx context.Context, r *RemoveNodeRequest) (*RemoveNodeResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveNode(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetTask(ctx context.Context, r *GetTaskRequest) (*GetTaskResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetTask(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListTasks(ctx context.Context, r *ListTasksRequest) (*ListTasksResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListTasks(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveTask(ctx context.Context, r *RemoveTaskRequest) (*RemoveTaskResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveTask(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetService(ctx context.Context, r *GetServiceRequest) (*GetServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListServices(ctx context.Context, r *ListServicesRequest) (*ListServicesResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListServices(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateService(ctx context.Context, r *CreateServiceRequest) (*CreateServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateService(ctx context.Context, r *UpdateServiceRequest) (*UpdateServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveService(ctx context.Context, r *RemoveServiceRequest) (*RemoveServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetNetwork(ctx context.Context, r *GetNetworkRequest) (*GetNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetNetwork(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListNetworks(ctx context.Context, r *ListNetworksRequest) (*ListNetworksResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListNetworks(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateNetwork(ctx context.Context, r *CreateNetworkRequest) (*CreateNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateNetwork(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveNetwork(ctx context.Context, r *RemoveNetworkRequest) (*RemoveNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveNetwork(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetCluster(ctx context.Context, r *GetClusterRequest) (*GetClusterResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetCluster(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListClusters(ctx context.Context, r *ListClustersRequest) (*ListClustersResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListClusters(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateCluster(ctx context.Context, r *UpdateClusterRequest) (*UpdateClusterResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateCluster(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetSecret(ctx context.Context, r *GetSecretRequest) (*GetSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateSecret(ctx context.Context, r *UpdateSecretRequest) (*UpdateSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListSecrets(ctx context.Context, r *ListSecretsRequest) (*ListSecretsResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListSecrets(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateSecret(ctx context.Context, r *CreateSecretRequest) (*CreateSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveSecret(ctx context.Context, r *RemoveSecretRequest) (*RemoveSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetConfig(ctx context.Context, r *GetConfigRequest) (*GetConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateConfig(ctx context.Context, r *UpdateConfigRequest) (*UpdateConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListConfigs(ctx context.Context, r *ListConfigsRequest) (*ListConfigsResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListConfigs(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateConfig(ctx context.Context, r *CreateConfigRequest) (*CreateConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveConfig(ctx context.Context, r *RemoveConfigRequest) (*RemoveConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveConfig(ctx, r) +} + +func (m *GetNodeRequest) Copy() *GetNodeRequest { + if m == nil { + return nil + } + o := &GetNodeRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetNodeRequest) CopyFrom(src interface{}) { + + o := src.(*GetNodeRequest) + *m = *o +} + +func (m *GetNodeResponse) Copy() *GetNodeResponse { + if m == nil { + return nil + } + o := &GetNodeResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetNodeResponse) CopyFrom(src interface{}) { + + o := src.(*GetNodeResponse) + *m = *o + if o.Node != nil { + m.Node = &Node{} + deepcopy.Copy(m.Node, o.Node) + } +} + +func (m *ListNodesRequest) Copy() *ListNodesRequest { + if m == nil { + return nil + } + o := &ListNodesRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListNodesRequest) CopyFrom(src interface{}) { + + o := src.(*ListNodesRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListNodesRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListNodesRequest_Filters) Copy() *ListNodesRequest_Filters { + if m == nil { + return nil + } + o := &ListNodesRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListNodesRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListNodesRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Memberships != nil { + m.Memberships = make([]NodeSpec_Membership, len(o.Memberships)) + copy(m.Memberships, o.Memberships) + } + + if o.Roles != nil { + m.Roles = make([]NodeRole, len(o.Roles)) + copy(m.Roles, o.Roles) + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListNodesResponse) Copy() *ListNodesResponse { + if m == nil { + return nil + } + o := &ListNodesResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListNodesResponse) CopyFrom(src interface{}) { + + o := src.(*ListNodesResponse) + *m = *o + if o.Nodes != nil { + m.Nodes = make([]*Node, len(o.Nodes)) + for i := range m.Nodes { + m.Nodes[i] = &Node{} + deepcopy.Copy(m.Nodes[i], o.Nodes[i]) + } + } + +} + +func (m *UpdateNodeRequest) Copy() *UpdateNodeRequest { + if m == nil { + return nil + } + o := &UpdateNodeRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateNodeRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateNodeRequest) + *m = *o + if o.NodeVersion != nil { + m.NodeVersion = &Version{} + deepcopy.Copy(m.NodeVersion, o.NodeVersion) + } + if o.Spec != nil { + m.Spec = &NodeSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateNodeResponse) Copy() *UpdateNodeResponse { + if m == nil { + return nil + } + o := &UpdateNodeResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateNodeResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateNodeResponse) + *m = *o + if o.Node != nil { + m.Node = &Node{} + deepcopy.Copy(m.Node, o.Node) + } +} + +func (m *RemoveNodeRequest) Copy() *RemoveNodeRequest { + if m == nil { + return nil + } + o := &RemoveNodeRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNodeRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveNodeRequest) + *m = *o +} + +func (m *RemoveNodeResponse) Copy() *RemoveNodeResponse { + if m == nil { + return nil + } + o := &RemoveNodeResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNodeResponse) CopyFrom(src interface{}) {} +func (m *GetTaskRequest) Copy() *GetTaskRequest { + if m == nil { + return nil + } + o := &GetTaskRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetTaskRequest) CopyFrom(src interface{}) { + + o := src.(*GetTaskRequest) + *m = *o +} + +func (m *GetTaskResponse) Copy() *GetTaskResponse { + if m == nil { + return nil + } + o := &GetTaskResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetTaskResponse) CopyFrom(src interface{}) { + + o := src.(*GetTaskResponse) + *m = *o + if o.Task != nil { + m.Task = &Task{} + deepcopy.Copy(m.Task, o.Task) + } +} + +func (m *RemoveTaskRequest) Copy() *RemoveTaskRequest { + if m == nil { + return nil + } + o := &RemoveTaskRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveTaskRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveTaskRequest) + *m = *o +} + +func (m *RemoveTaskResponse) Copy() *RemoveTaskResponse { + if m == nil { + return nil + } + o := &RemoveTaskResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveTaskResponse) CopyFrom(src interface{}) {} +func (m *ListTasksRequest) Copy() *ListTasksRequest { + if m == nil { + return nil + } + o := &ListTasksRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListTasksRequest) CopyFrom(src interface{}) { + + o := src.(*ListTasksRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListTasksRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListTasksRequest_Filters) Copy() *ListTasksRequest_Filters { + if m == nil { + return nil + } + o := &ListTasksRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListTasksRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListTasksRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.ServiceIDs != nil { + m.ServiceIDs = make([]string, len(o.ServiceIDs)) + copy(m.ServiceIDs, o.ServiceIDs) + } + + if o.NodeIDs != nil { + m.NodeIDs = make([]string, len(o.NodeIDs)) + copy(m.NodeIDs, o.NodeIDs) + } + + if o.DesiredStates != nil { + m.DesiredStates = make([]TaskState, len(o.DesiredStates)) + copy(m.DesiredStates, o.DesiredStates) + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + + if o.Runtimes != nil { + m.Runtimes = make([]string, len(o.Runtimes)) + copy(m.Runtimes, o.Runtimes) + } + +} + +func (m *ListTasksResponse) Copy() *ListTasksResponse { + if m == nil { + return nil + } + o := &ListTasksResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListTasksResponse) CopyFrom(src interface{}) { + + o := src.(*ListTasksResponse) + *m = *o + if o.Tasks != nil { + m.Tasks = make([]*Task, len(o.Tasks)) + for i := range m.Tasks { + m.Tasks[i] = &Task{} + deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + } + } + +} + +func (m *CreateServiceRequest) Copy() *CreateServiceRequest { + if m == nil { + return nil + } + o := &CreateServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateServiceRequest) CopyFrom(src interface{}) { + + o := src.(*CreateServiceRequest) + *m = *o + if o.Spec != nil { + m.Spec = &ServiceSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateServiceResponse) Copy() *CreateServiceResponse { + if m == nil { + return nil + } + o := &CreateServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateServiceResponse) CopyFrom(src interface{}) { + + o := src.(*CreateServiceResponse) + *m = *o + if o.Service != nil { + m.Service = &Service{} + deepcopy.Copy(m.Service, o.Service) + } +} + +func (m *GetServiceRequest) Copy() *GetServiceRequest { + if m == nil { + return nil + } + o := &GetServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetServiceRequest) CopyFrom(src interface{}) { + + o := src.(*GetServiceRequest) + *m = *o +} + +func (m *GetServiceResponse) Copy() *GetServiceResponse { + if m == nil { + return nil + } + o := &GetServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetServiceResponse) CopyFrom(src interface{}) { + + o := src.(*GetServiceResponse) + *m = *o + if o.Service != nil { + m.Service = &Service{} + deepcopy.Copy(m.Service, o.Service) + } +} + +func (m *UpdateServiceRequest) Copy() *UpdateServiceRequest { + if m == nil { + return nil + } + o := &UpdateServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateServiceRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateServiceRequest) + *m = *o + if o.ServiceVersion != nil { + m.ServiceVersion = &Version{} + deepcopy.Copy(m.ServiceVersion, o.ServiceVersion) + } + if o.Spec != nil { + m.Spec = &ServiceSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateServiceResponse) Copy() *UpdateServiceResponse { + if m == nil { + return nil + } + o := &UpdateServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateServiceResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateServiceResponse) + *m = *o + if o.Service != nil { + m.Service = &Service{} + deepcopy.Copy(m.Service, o.Service) + } +} + +func (m *RemoveServiceRequest) Copy() *RemoveServiceRequest { + if m == nil { + return nil + } + o := &RemoveServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveServiceRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveServiceRequest) + *m = *o +} + +func (m *RemoveServiceResponse) Copy() *RemoveServiceResponse { + if m == nil { + return nil + } + o := &RemoveServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveServiceResponse) CopyFrom(src interface{}) {} +func (m *ListServicesRequest) Copy() *ListServicesRequest { + if m == nil { + return nil + } + o := &ListServicesRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListServicesRequest) CopyFrom(src interface{}) { + + o := src.(*ListServicesRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListServicesRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListServicesRequest_Filters) Copy() *ListServicesRequest_Filters { + if m == nil { + return nil + } + o := &ListServicesRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListServicesRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListServicesRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + + if o.Runtimes != nil { + m.Runtimes = make([]string, len(o.Runtimes)) + copy(m.Runtimes, o.Runtimes) + } + +} + +func (m *ListServicesResponse) Copy() *ListServicesResponse { + if m == nil { + return nil + } + o := &ListServicesResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListServicesResponse) CopyFrom(src interface{}) { + + o := src.(*ListServicesResponse) + *m = *o + if o.Services != nil { + m.Services = make([]*Service, len(o.Services)) + for i := range m.Services { + m.Services[i] = &Service{} + deepcopy.Copy(m.Services[i], o.Services[i]) + } + } + +} + +func (m *CreateNetworkRequest) Copy() *CreateNetworkRequest { + if m == nil { + return nil + } + o := &CreateNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*CreateNetworkRequest) + *m = *o + if o.Spec != nil { + m.Spec = &NetworkSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateNetworkResponse) Copy() *CreateNetworkResponse { + if m == nil { + return nil + } + o := &CreateNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateNetworkResponse) CopyFrom(src interface{}) { + + o := src.(*CreateNetworkResponse) + *m = *o + if o.Network != nil { + m.Network = &Network{} + deepcopy.Copy(m.Network, o.Network) + } +} + +func (m *GetNetworkRequest) Copy() *GetNetworkRequest { + if m == nil { + return nil + } + o := &GetNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*GetNetworkRequest) + *m = *o +} + +func (m *GetNetworkResponse) Copy() *GetNetworkResponse { + if m == nil { + return nil + } + o := &GetNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetNetworkResponse) CopyFrom(src interface{}) { + + o := src.(*GetNetworkResponse) + *m = *o + if o.Network != nil { + m.Network = &Network{} + deepcopy.Copy(m.Network, o.Network) + } +} + +func (m *RemoveNetworkRequest) Copy() *RemoveNetworkRequest { + if m == nil { + return nil + } + o := &RemoveNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveNetworkRequest) + *m = *o +} + +func (m *RemoveNetworkResponse) Copy() *RemoveNetworkResponse { + if m == nil { + return nil + } + o := &RemoveNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNetworkResponse) CopyFrom(src interface{}) {} +func (m *ListNetworksRequest) Copy() *ListNetworksRequest { + if m == nil { + return nil + } + o := &ListNetworksRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListNetworksRequest) CopyFrom(src interface{}) { + + o := src.(*ListNetworksRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListNetworksRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListNetworksRequest_Filters) Copy() *ListNetworksRequest_Filters { + if m == nil { + return nil + } + o := &ListNetworksRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListNetworksRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListNetworksRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListNetworksResponse) Copy() *ListNetworksResponse { + if m == nil { + return nil + } + o := &ListNetworksResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListNetworksResponse) CopyFrom(src interface{}) { + + o := src.(*ListNetworksResponse) + *m = *o + if o.Networks != nil { + m.Networks = make([]*Network, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &Network{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + +} + +func (m *GetClusterRequest) Copy() *GetClusterRequest { + if m == nil { + return nil + } + o := &GetClusterRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetClusterRequest) CopyFrom(src interface{}) { + + o := src.(*GetClusterRequest) + *m = *o +} + +func (m *GetClusterResponse) Copy() *GetClusterResponse { + if m == nil { + return nil + } + o := &GetClusterResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetClusterResponse) CopyFrom(src interface{}) { + + o := src.(*GetClusterResponse) + *m = *o + if o.Cluster != nil { + m.Cluster = &Cluster{} + deepcopy.Copy(m.Cluster, o.Cluster) + } +} + +func (m *ListClustersRequest) Copy() *ListClustersRequest { + if m == nil { + return nil + } + o := &ListClustersRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListClustersRequest) CopyFrom(src interface{}) { + + o := src.(*ListClustersRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListClustersRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListClustersRequest_Filters) Copy() *ListClustersRequest_Filters { + if m == nil { + return nil + } + o := &ListClustersRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListClustersRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListClustersRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListClustersResponse) Copy() *ListClustersResponse { + if m == nil { + return nil + } + o := &ListClustersResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListClustersResponse) CopyFrom(src interface{}) { + + o := src.(*ListClustersResponse) + *m = *o + if o.Clusters != nil { + m.Clusters = make([]*Cluster, len(o.Clusters)) + for i := range m.Clusters { + m.Clusters[i] = &Cluster{} + deepcopy.Copy(m.Clusters[i], o.Clusters[i]) + } + } + +} + +func (m *KeyRotation) Copy() *KeyRotation { + if m == nil { + return nil + } + o := &KeyRotation{} + o.CopyFrom(m) + return o +} + +func (m *KeyRotation) CopyFrom(src interface{}) { + + o := src.(*KeyRotation) + *m = *o +} + +func (m *UpdateClusterRequest) Copy() *UpdateClusterRequest { + if m == nil { + return nil + } + o := &UpdateClusterRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateClusterRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateClusterRequest) + *m = *o + if o.ClusterVersion != nil { + m.ClusterVersion = &Version{} + deepcopy.Copy(m.ClusterVersion, o.ClusterVersion) + } + if o.Spec != nil { + m.Spec = &ClusterSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } + deepcopy.Copy(&m.Rotation, &o.Rotation) +} + +func (m *UpdateClusterResponse) Copy() *UpdateClusterResponse { + if m == nil { + return nil + } + o := &UpdateClusterResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateClusterResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateClusterResponse) + *m = *o + if o.Cluster != nil { + m.Cluster = &Cluster{} + deepcopy.Copy(m.Cluster, o.Cluster) + } +} + +func (m *GetSecretRequest) Copy() *GetSecretRequest { + if m == nil { + return nil + } + o := &GetSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetSecretRequest) CopyFrom(src interface{}) { + + o := src.(*GetSecretRequest) + *m = *o +} + +func (m *GetSecretResponse) Copy() *GetSecretResponse { + if m == nil { + return nil + } + o := &GetSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetSecretResponse) CopyFrom(src interface{}) { + + o := src.(*GetSecretResponse) + *m = *o + if o.Secret != nil { + m.Secret = &Secret{} + deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *UpdateSecretRequest) Copy() *UpdateSecretRequest { + if m == nil { + return nil + } + o := &UpdateSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateSecretRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateSecretRequest) + *m = *o + if o.SecretVersion != nil { + m.SecretVersion = &Version{} + deepcopy.Copy(m.SecretVersion, o.SecretVersion) + } + if o.Spec != nil { + m.Spec = &SecretSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateSecretResponse) Copy() *UpdateSecretResponse { + if m == nil { + return nil + } + o := &UpdateSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateSecretResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateSecretResponse) + *m = *o + if o.Secret != nil { + m.Secret = &Secret{} + deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *ListSecretsRequest) Copy() *ListSecretsRequest { + if m == nil { + return nil + } + o := &ListSecretsRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListSecretsRequest) CopyFrom(src interface{}) { + + o := src.(*ListSecretsRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListSecretsRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListSecretsRequest_Filters) Copy() *ListSecretsRequest_Filters { + if m == nil { + return nil + } + o := &ListSecretsRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListSecretsRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListSecretsRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListSecretsResponse) Copy() *ListSecretsResponse { + if m == nil { + return nil + } + o := &ListSecretsResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListSecretsResponse) CopyFrom(src interface{}) { + + o := src.(*ListSecretsResponse) + *m = *o + if o.Secrets != nil { + m.Secrets = make([]*Secret, len(o.Secrets)) + for i := range m.Secrets { + m.Secrets[i] = &Secret{} + deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + } + } + +} + +func (m *CreateSecretRequest) Copy() *CreateSecretRequest { + if m == nil { + return nil + } + o := &CreateSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateSecretRequest) CopyFrom(src interface{}) { + + o := src.(*CreateSecretRequest) + *m = *o + if o.Spec != nil { + m.Spec = &SecretSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateSecretResponse) Copy() *CreateSecretResponse { + if m == nil { + return nil + } + o := &CreateSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateSecretResponse) CopyFrom(src interface{}) { + + o := src.(*CreateSecretResponse) + *m = *o + if o.Secret != nil { + m.Secret = &Secret{} + deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *RemoveSecretRequest) Copy() *RemoveSecretRequest { + if m == nil { + return nil + } + o := &RemoveSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveSecretRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveSecretRequest) + *m = *o +} + +func (m *RemoveSecretResponse) Copy() *RemoveSecretResponse { + if m == nil { + return nil + } + o := &RemoveSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveSecretResponse) CopyFrom(src interface{}) {} +func (m *GetConfigRequest) Copy() *GetConfigRequest { + if m == nil { + return nil + } + o := &GetConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetConfigRequest) CopyFrom(src interface{}) { + + o := src.(*GetConfigRequest) + *m = *o +} + +func (m *GetConfigResponse) Copy() *GetConfigResponse { + if m == nil { + return nil + } + o := &GetConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetConfigResponse) CopyFrom(src interface{}) { + + o := src.(*GetConfigResponse) + *m = *o + if o.Config != nil { + m.Config = &Config{} + deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *UpdateConfigRequest) Copy() *UpdateConfigRequest { + if m == nil { + return nil + } + o := &UpdateConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateConfigRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateConfigRequest) + *m = *o + if o.ConfigVersion != nil { + m.ConfigVersion = &Version{} + deepcopy.Copy(m.ConfigVersion, o.ConfigVersion) + } + if o.Spec != nil { + m.Spec = &ConfigSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateConfigResponse) Copy() *UpdateConfigResponse { + if m == nil { + return nil + } + o := &UpdateConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateConfigResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateConfigResponse) + *m = *o + if o.Config != nil { + m.Config = &Config{} + deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *ListConfigsRequest) Copy() *ListConfigsRequest { + if m == nil { + return nil + } + o := &ListConfigsRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListConfigsRequest) CopyFrom(src interface{}) { + + o := src.(*ListConfigsRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListConfigsRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListConfigsRequest_Filters) Copy() *ListConfigsRequest_Filters { + if m == nil { + return nil + } + o := &ListConfigsRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListConfigsRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListConfigsRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListConfigsResponse) Copy() *ListConfigsResponse { + if m == nil { + return nil + } + o := &ListConfigsResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListConfigsResponse) CopyFrom(src interface{}) { + + o := src.(*ListConfigsResponse) + *m = *o + if o.Configs != nil { + m.Configs = make([]*Config, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &Config{} + deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + +} + +func (m *CreateConfigRequest) Copy() *CreateConfigRequest { + if m == nil { + return nil + } + o := &CreateConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateConfigRequest) CopyFrom(src interface{}) { + + o := src.(*CreateConfigRequest) + *m = *o + if o.Spec != nil { + m.Spec = &ConfigSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateConfigResponse) Copy() *CreateConfigResponse { + if m == nil { + return nil + } + o := &CreateConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateConfigResponse) CopyFrom(src interface{}) { + + o := src.(*CreateConfigResponse) + *m = *o + if o.Config != nil { + m.Config = &Config{} + deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *RemoveConfigRequest) Copy() *RemoveConfigRequest { + if m == nil { + return nil + } + o := &RemoveConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveConfigRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveConfigRequest) + *m = *o +} + +func (m *RemoveConfigResponse) Copy() *RemoveConfigResponse { + if m == nil { + return nil + } + o := &RemoveConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveConfigResponse) CopyFrom(src interface{}) {} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Control service + +type ControlClient interface { + GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) + ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) + UpdateNode(ctx context.Context, in *UpdateNodeRequest, opts ...grpc.CallOption) (*UpdateNodeResponse, error) + RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) + GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) + ListTasks(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) + RemoveTask(ctx context.Context, in *RemoveTaskRequest, opts ...grpc.CallOption) (*RemoveTaskResponse, error) + GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*GetServiceResponse, error) + ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*CreateServiceResponse, error) + UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*UpdateServiceResponse, error) + RemoveService(ctx context.Context, in *RemoveServiceRequest, opts ...grpc.CallOption) (*RemoveServiceResponse, error) + GetNetwork(ctx context.Context, in *GetNetworkRequest, opts ...grpc.CallOption) (*GetNetworkResponse, error) + ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) + CreateNetwork(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*CreateNetworkResponse, error) + RemoveNetwork(ctx context.Context, in *RemoveNetworkRequest, opts ...grpc.CallOption) (*RemoveNetworkResponse, error) + GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*GetClusterResponse, error) + ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*UpdateClusterResponse, error) + // GetSecret returns a `GetSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if getting fails. + GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) + // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if updating fails. + UpdateSecret(ctx context.Context, in *UpdateSecretRequest, opts ...grpc.CallOption) (*UpdateSecretResponse, error) + // ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being + // managed, or all secrets matching any name in `ListSecretsRequest.Names`, any + // name prefix in `ListSecretsRequest.NamePrefixes`, any id in + // `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListSecrets(ctx context.Context, in *ListSecretsRequest, opts ...grpc.CallOption) (*ListSecretsResponse, error) + // CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based + // on the provided `CreateSecretRequest.SecretSpec`. + // - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, + // or if the secret data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateSecret(ctx context.Context, in *CreateSecretRequest, opts ...grpc.CallOption) (*CreateSecretResponse, error) + // RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. + // - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. + // - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveSecret(ctx context.Context, in *RemoveSecretRequest, opts ...grpc.CallOption) (*RemoveSecretResponse, error) + // GetConfig returns a `GetConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if getting fails. + GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) + // UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if updating fails. + UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) + // ListConfigs returns a `ListConfigResponse` with a list of `Config`s being + // managed, or all configs matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListConfigsRequest.NamePrefixes`, any id in + // `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListConfigs(ctx context.Context, in *ListConfigsRequest, opts ...grpc.CallOption) (*ListConfigsResponse, error) + // CreateConfig creates and return a `CreateConfigResponse` with a `Config` based + // on the provided `CreateConfigRequest.ConfigSpec`. + // - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateConfig(ctx context.Context, in *CreateConfigRequest, opts ...grpc.CallOption) (*CreateConfigResponse, error) + // RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`. + // - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty. + // - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveConfig(ctx context.Context, in *RemoveConfigRequest, opts ...grpc.CallOption) (*RemoveConfigResponse, error) +} + +type controlClient struct { + cc *grpc.ClientConn +} + +func NewControlClient(cc *grpc.ClientConn) ControlClient { + return &controlClient{cc} +} + +func (c *controlClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { + out := new(GetNodeResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) { + out := new(ListNodesResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListNodes", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateNode(ctx context.Context, in *UpdateNodeRequest, opts ...grpc.CallOption) (*UpdateNodeResponse, error) { + out := new(UpdateNodeResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) { + out := new(RemoveNodeResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) { + out := new(GetTaskResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetTask", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListTasks(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) { + out := new(ListTasksResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListTasks", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveTask(ctx context.Context, in *RemoveTaskRequest, opts ...grpc.CallOption) (*RemoveTaskResponse, error) { + out := new(RemoveTaskResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveTask", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*GetServiceResponse, error) { + out := new(GetServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { + out := new(ListServicesResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListServices", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*CreateServiceResponse, error) { + out := new(CreateServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*UpdateServiceResponse, error) { + out := new(UpdateServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveService(ctx context.Context, in *RemoveServiceRequest, opts ...grpc.CallOption) (*RemoveServiceResponse, error) { + out := new(RemoveServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetNetwork(ctx context.Context, in *GetNetworkRequest, opts ...grpc.CallOption) (*GetNetworkResponse, error) { + out := new(GetNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) { + out := new(ListNetworksResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListNetworks", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateNetwork(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*CreateNetworkResponse, error) { + out := new(CreateNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveNetwork(ctx context.Context, in *RemoveNetworkRequest, opts ...grpc.CallOption) (*RemoveNetworkResponse, error) { + out := new(RemoveNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*GetClusterResponse, error) { + out := new(GetClusterResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListClusters", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*UpdateClusterResponse, error) { + out := new(UpdateClusterResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) { + out := new(GetSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateSecret(ctx context.Context, in *UpdateSecretRequest, opts ...grpc.CallOption) (*UpdateSecretResponse, error) { + out := new(UpdateSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListSecrets(ctx context.Context, in *ListSecretsRequest, opts ...grpc.CallOption) (*ListSecretsResponse, error) { + out := new(ListSecretsResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListSecrets", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateSecret(ctx context.Context, in *CreateSecretRequest, opts ...grpc.CallOption) (*CreateSecretResponse, error) { + out := new(CreateSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveSecret(ctx context.Context, in *RemoveSecretRequest, opts ...grpc.CallOption) (*RemoveSecretResponse, error) { + out := new(RemoveSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { + out := new(GetConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) { + out := new(UpdateConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListConfigs(ctx context.Context, in *ListConfigsRequest, opts ...grpc.CallOption) (*ListConfigsResponse, error) { + out := new(ListConfigsResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListConfigs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateConfig(ctx context.Context, in *CreateConfigRequest, opts ...grpc.CallOption) (*CreateConfigResponse, error) { + out := new(CreateConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveConfig(ctx context.Context, in *RemoveConfigRequest, opts ...grpc.CallOption) (*RemoveConfigResponse, error) { + out := new(RemoveConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Control service + +type ControlServer interface { + GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) + ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error) + UpdateNode(context.Context, *UpdateNodeRequest) (*UpdateNodeResponse, error) + RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error) + GetTask(context.Context, *GetTaskRequest) (*GetTaskResponse, error) + ListTasks(context.Context, *ListTasksRequest) (*ListTasksResponse, error) + RemoveTask(context.Context, *RemoveTaskRequest) (*RemoveTaskResponse, error) + GetService(context.Context, *GetServiceRequest) (*GetServiceResponse, error) + ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + CreateService(context.Context, *CreateServiceRequest) (*CreateServiceResponse, error) + UpdateService(context.Context, *UpdateServiceRequest) (*UpdateServiceResponse, error) + RemoveService(context.Context, *RemoveServiceRequest) (*RemoveServiceResponse, error) + GetNetwork(context.Context, *GetNetworkRequest) (*GetNetworkResponse, error) + ListNetworks(context.Context, *ListNetworksRequest) (*ListNetworksResponse, error) + CreateNetwork(context.Context, *CreateNetworkRequest) (*CreateNetworkResponse, error) + RemoveNetwork(context.Context, *RemoveNetworkRequest) (*RemoveNetworkResponse, error) + GetCluster(context.Context, *GetClusterRequest) (*GetClusterResponse, error) + ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + UpdateCluster(context.Context, *UpdateClusterRequest) (*UpdateClusterResponse, error) + // GetSecret returns a `GetSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if getting fails. + GetSecret(context.Context, *GetSecretRequest) (*GetSecretResponse, error) + // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if updating fails. + UpdateSecret(context.Context, *UpdateSecretRequest) (*UpdateSecretResponse, error) + // ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being + // managed, or all secrets matching any name in `ListSecretsRequest.Names`, any + // name prefix in `ListSecretsRequest.NamePrefixes`, any id in + // `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListSecrets(context.Context, *ListSecretsRequest) (*ListSecretsResponse, error) + // CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based + // on the provided `CreateSecretRequest.SecretSpec`. + // - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, + // or if the secret data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateSecret(context.Context, *CreateSecretRequest) (*CreateSecretResponse, error) + // RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. + // - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. + // - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveSecret(context.Context, *RemoveSecretRequest) (*RemoveSecretResponse, error) + // GetConfig returns a `GetConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if getting fails. + GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) + // UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if updating fails. + UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) + // ListConfigs returns a `ListConfigResponse` with a list of `Config`s being + // managed, or all configs matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListConfigsRequest.NamePrefixes`, any id in + // `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListConfigs(context.Context, *ListConfigsRequest) (*ListConfigsResponse, error) + // CreateConfig creates and return a `CreateConfigResponse` with a `Config` based + // on the provided `CreateConfigRequest.ConfigSpec`. + // - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateConfig(context.Context, *CreateConfigRequest) (*CreateConfigResponse, error) + // RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`. + // - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty. + // - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveConfig(context.Context, *RemoveConfigRequest) (*RemoveConfigResponse, error) +} + +func RegisterControlServer(s *grpc.Server, srv ControlServer) { + s.RegisterService(&_Control_serviceDesc, srv) +} + +func _Control_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetNode(ctx, req.(*GetNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListNodes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNodesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListNodes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListNodes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListNodes(ctx, req.(*ListNodesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateNode(ctx, req.(*UpdateNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveNode(ctx, req.(*RemoveNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetTask(ctx, req.(*GetTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTasksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListTasks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListTasks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListTasks(ctx, req.(*ListTasksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveTask(ctx, req.(*RemoveTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetService(ctx, req.(*GetServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListServices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListServices", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListServices(ctx, req.(*ListServicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateService(ctx, req.(*CreateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateService(ctx, req.(*UpdateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveService(ctx, req.(*RemoveServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetNetwork(ctx, req.(*GetNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListNetworks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNetworksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListNetworks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListNetworks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListNetworks(ctx, req.(*ListNetworksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateNetwork(ctx, req.(*CreateNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveNetwork(ctx, req.(*RemoveNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetCluster(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListClusters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListClusters", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListClusters(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateCluster(ctx, req.(*UpdateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetSecret(ctx, req.(*GetSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateSecret(ctx, req.(*UpdateSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListSecrets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSecretsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListSecrets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListSecrets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListSecrets(ctx, req.(*ListSecretsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateSecret(ctx, req.(*CreateSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveSecret(ctx, req.(*RemoveSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetConfig(ctx, req.(*GetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateConfig(ctx, req.(*UpdateConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListConfigs(ctx, req.(*ListConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateConfig(ctx, req.(*CreateConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveConfig(ctx, req.(*RemoveConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Control_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Control", + HandlerType: (*ControlServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetNode", + Handler: _Control_GetNode_Handler, + }, + { + MethodName: "ListNodes", + Handler: _Control_ListNodes_Handler, + }, + { + MethodName: "UpdateNode", + Handler: _Control_UpdateNode_Handler, + }, + { + MethodName: "RemoveNode", + Handler: _Control_RemoveNode_Handler, + }, + { + MethodName: "GetTask", + Handler: _Control_GetTask_Handler, + }, + { + MethodName: "ListTasks", + Handler: _Control_ListTasks_Handler, + }, + { + MethodName: "RemoveTask", + Handler: _Control_RemoveTask_Handler, + }, + { + MethodName: "GetService", + Handler: _Control_GetService_Handler, + }, + { + MethodName: "ListServices", + Handler: _Control_ListServices_Handler, + }, + { + MethodName: "CreateService", + Handler: _Control_CreateService_Handler, + }, + { + MethodName: "UpdateService", + Handler: _Control_UpdateService_Handler, + }, + { + MethodName: "RemoveService", + Handler: _Control_RemoveService_Handler, + }, + { + MethodName: "GetNetwork", + Handler: _Control_GetNetwork_Handler, + }, + { + MethodName: "ListNetworks", + Handler: _Control_ListNetworks_Handler, + }, + { + MethodName: "CreateNetwork", + Handler: _Control_CreateNetwork_Handler, + }, + { + MethodName: "RemoveNetwork", + Handler: _Control_RemoveNetwork_Handler, + }, + { + MethodName: "GetCluster", + Handler: _Control_GetCluster_Handler, + }, + { + MethodName: "ListClusters", + Handler: _Control_ListClusters_Handler, + }, + { + MethodName: "UpdateCluster", + Handler: _Control_UpdateCluster_Handler, + }, + { + MethodName: "GetSecret", + Handler: _Control_GetSecret_Handler, + }, + { + MethodName: "UpdateSecret", + Handler: _Control_UpdateSecret_Handler, + }, + { + MethodName: "ListSecrets", + Handler: _Control_ListSecrets_Handler, + }, + { + MethodName: "CreateSecret", + Handler: _Control_CreateSecret_Handler, + }, + { + MethodName: "RemoveSecret", + Handler: _Control_RemoveSecret_Handler, + }, + { + MethodName: "GetConfig", + Handler: _Control_GetConfig_Handler, + }, + { + MethodName: "UpdateConfig", + Handler: _Control_UpdateConfig_Handler, + }, + { + MethodName: "ListConfigs", + Handler: _Control_ListConfigs_Handler, + }, + { + MethodName: "CreateConfig", + Handler: _Control_CreateConfig_Handler, + }, + { + MethodName: "RemoveConfig", + Handler: _Control_RemoveConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/control.proto", +} + +func (m *GetNodeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNodeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + return i, nil +} + +func (m *GetNodeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNodeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Node.Size())) + n1, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *ListNodesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNodesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n2, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *ListNodesRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNodesRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Memberships) > 0 { + for _, num := range m.Memberships { + dAtA[i] = 0x20 + i++ + i = encodeVarintControl(dAtA, i, uint64(num)) + } + } + if len(m.Roles) > 0 { + for _, num := range m.Roles { + dAtA[i] = 0x28 + i++ + i = encodeVarintControl(dAtA, i, uint64(num)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListNodesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNodesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, msg := range m.Nodes { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *UpdateNodeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateNodeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if m.NodeVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.NodeVersion.Size())) + n3, err := m.NodeVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *UpdateNodeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateNodeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Node.Size())) + n5, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *RemoveNodeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNodeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if m.Force { + dAtA[i] = 0x10 + i++ + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *RemoveNodeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNodeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + return i, nil +} + +func (m *GetTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTaskResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Task != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Task.Size())) + n6, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *RemoveTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + return i, nil +} + +func (m *RemoveTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveTaskResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListTasksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n7, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *ListTasksRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.DesiredStates) > 0 { + for _, num := range m.DesiredStates { + dAtA[i] = 0x30 + i++ + i = encodeVarintControl(dAtA, i, uint64(num)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.UpToDate { + dAtA[i] = 0x40 + i++ + if m.UpToDate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + dAtA[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListTasksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n8, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *CreateServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Service.Size())) + n9, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *GetServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.InsertDefaults { + dAtA[i] = 0x10 + i++ + if m.InsertDefaults { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *GetServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Service.Size())) + n10, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} + +func (m *UpdateServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.ServiceVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ServiceVersion.Size())) + n11, err := m.ServiceVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n12, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Rollback != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Rollback)) + } + return i, nil +} + +func (m *UpdateServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Service.Size())) + n13, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} + +func (m *RemoveServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + return i, nil +} + +func (m *RemoveServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListServicesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServicesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n14, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} + +func (m *ListServicesRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServicesRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListServicesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServicesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Services) > 0 { + for _, msg := range m.Services { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n15, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} + +func (m *CreateNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Network != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Network.Size())) + n16, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} + +func (m *GetNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.NetworkID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + return i, nil +} + +func (m *GetNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Network != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Network.Size())) + n17, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *RemoveNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.NetworkID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + return i, nil +} + +func (m *RemoveNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListNetworksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNetworksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n18, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func (m *ListNetworksRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNetworksRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListNetworksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNetworksResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *GetClusterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ClusterID))) + i += copy(dAtA[i:], m.ClusterID) + } + return i, nil +} + +func (m *GetClusterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Cluster != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Cluster.Size())) + n19, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} + +func (m *ListClustersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListClustersRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n20, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + return i, nil +} + +func (m *ListClustersRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListClustersRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListClustersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListClustersResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Clusters) > 0 { + for _, msg := range m.Clusters { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *KeyRotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyRotation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WorkerJoinToken { + dAtA[i] = 0x8 + i++ + if m.WorkerJoinToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ManagerJoinToken { + dAtA[i] = 0x10 + i++ + if m.ManagerJoinToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ManagerUnlockKey { + dAtA[i] = 0x18 + i++ + if m.ManagerUnlockKey { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *UpdateClusterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateClusterRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ClusterID))) + i += copy(dAtA[i:], m.ClusterID) + } + if m.ClusterVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ClusterVersion.Size())) + n21, err := m.ClusterVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n22, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + dAtA[i] = 0x22 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Rotation.Size())) + n23, err := m.Rotation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil +} + +func (m *UpdateClusterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateClusterResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Cluster != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Cluster.Size())) + n24, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + return i, nil +} + +func (m *GetSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + return i, nil +} + +func (m *GetSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Secret.Size())) + n25, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil +} + +func (m *UpdateSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + if m.SecretVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.SecretVersion.Size())) + n26, err := m.SecretVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n27, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + return i, nil +} + +func (m *UpdateSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Secret.Size())) + n28, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + return i, nil +} + +func (m *ListSecretsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListSecretsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n29, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func (m *ListSecretsRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListSecretsRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListSecretsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListSecretsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n30, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *CreateSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Secret.Size())) + n31, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + return i, nil +} + +func (m *RemoveSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + return i, nil +} + +func (m *RemoveSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + return i, nil +} + +func (m *GetConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Config.Size())) + n32, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} + +func (m *UpdateConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + if m.ConfigVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ConfigVersion.Size())) + n33, err := m.ConfigVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + return i, nil +} + +func (m *UpdateConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Config.Size())) + n35, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + return i, nil +} + +func (m *ListConfigsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListConfigsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n36, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + } + return i, nil +} + +func (m *ListConfigsRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListConfigsRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListConfigsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListConfigsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} + +func (m *CreateConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Config.Size())) + n38, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + return i, nil +} + +func (m *RemoveConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + return i, nil +} + +func (m *RemoveConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintControl(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyControlServer struct { + local ControlServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyControlServer(local ControlServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) ControlServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyControlServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyControlServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyControlServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyControlServer) GetNode(ctx context.Context, r *GetNodeRequest) (*GetNodeResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetNode(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetNode(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetNode(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetNode(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListNodes(ctx context.Context, r *ListNodesRequest) (*ListNodesResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListNodes(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListNodes(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListNodes(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListNodes(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateNode(ctx context.Context, r *UpdateNodeRequest) (*UpdateNodeResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateNode(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateNode(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateNode(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateNode(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveNode(ctx context.Context, r *RemoveNodeRequest) (*RemoveNodeResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveNode(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveNode(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveNode(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveNode(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetTask(ctx context.Context, r *GetTaskRequest) (*GetTaskResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetTask(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetTask(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetTask(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetTask(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListTasks(ctx context.Context, r *ListTasksRequest) (*ListTasksResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListTasks(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListTasks(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListTasks(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListTasks(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveTask(ctx context.Context, r *RemoveTaskRequest) (*RemoveTaskResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveTask(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveTask(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveTask(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveTask(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetService(ctx context.Context, r *GetServiceRequest) (*GetServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListServices(ctx context.Context, r *ListServicesRequest) (*ListServicesResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListServices(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListServices(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListServices(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListServices(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateService(ctx context.Context, r *CreateServiceRequest) (*CreateServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateService(ctx context.Context, r *UpdateServiceRequest) (*UpdateServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveService(ctx context.Context, r *RemoveServiceRequest) (*RemoveServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetNetwork(ctx context.Context, r *GetNetworkRequest) (*GetNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetNetwork(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListNetworks(ctx context.Context, r *ListNetworksRequest) (*ListNetworksResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListNetworks(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListNetworks(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListNetworks(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListNetworks(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateNetwork(ctx context.Context, r *CreateNetworkRequest) (*CreateNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateNetwork(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveNetwork(ctx context.Context, r *RemoveNetworkRequest) (*RemoveNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveNetwork(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetCluster(ctx context.Context, r *GetClusterRequest) (*GetClusterResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetCluster(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetCluster(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetCluster(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetCluster(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListClusters(ctx context.Context, r *ListClustersRequest) (*ListClustersResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListClusters(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListClusters(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListClusters(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListClusters(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateCluster(ctx context.Context, r *UpdateClusterRequest) (*UpdateClusterResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateCluster(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateCluster(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateCluster(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateCluster(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetSecret(ctx context.Context, r *GetSecretRequest) (*GetSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateSecret(ctx context.Context, r *UpdateSecretRequest) (*UpdateSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListSecrets(ctx context.Context, r *ListSecretsRequest) (*ListSecretsResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListSecrets(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListSecrets(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListSecrets(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListSecrets(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateSecret(ctx context.Context, r *CreateSecretRequest) (*CreateSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveSecret(ctx context.Context, r *RemoveSecretRequest) (*RemoveSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetConfig(ctx context.Context, r *GetConfigRequest) (*GetConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateConfig(ctx context.Context, r *UpdateConfigRequest) (*UpdateConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListConfigs(ctx context.Context, r *ListConfigsRequest) (*ListConfigsResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListConfigs(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListConfigs(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListConfigs(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListConfigs(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateConfig(ctx context.Context, r *CreateConfigRequest) (*CreateConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveConfig(ctx context.Context, r *RemoveConfigRequest) (*RemoveConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveConfig(modCtx, r) + } + return resp, err +} + +func (m *GetNodeRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetNodeResponse) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListNodesRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListNodesRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.Memberships) > 0 { + for _, e := range m.Memberships { + n += 1 + sovControl(uint64(e)) + } + } + if len(m.Roles) > 0 { + for _, e := range m.Roles { + n += 1 + sovControl(uint64(e)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListNodesResponse) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *UpdateNodeRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.NodeVersion != nil { + l = m.NodeVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateNodeResponse) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveNodeRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Force { + n += 2 + } + return n +} + +func (m *RemoveNodeResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetTaskResponse) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveTaskResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListTasksRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListTasksRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.DesiredStates) > 0 { + for _, e := range m.DesiredStates { + n += 1 + sovControl(uint64(e)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if m.UpToDate { + n += 2 + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListTasksResponse) Size() (n int) { + var l int + _ = l + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateServiceRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateServiceResponse) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetServiceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.InsertDefaults { + n += 2 + } + return n +} + +func (m *GetServiceResponse) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateServiceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ServiceVersion != nil { + l = m.ServiceVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Rollback != 0 { + n += 1 + sovControl(uint64(m.Rollback)) + } + return n +} + +func (m *UpdateServiceResponse) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveServiceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveServiceResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListServicesRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListServicesRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListServicesResponse) Size() (n int) { + var l int + _ = l + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateNetworkRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateNetworkResponse) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetNetworkRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetNetworkResponse) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveNetworkRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveNetworkResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListNetworksRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListNetworksRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListNetworksResponse) Size() (n int) { + var l int + _ = l + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *GetClusterRequest) Size() (n int) { + var l int + _ = l + l = len(m.ClusterID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetClusterResponse) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListClustersRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListClustersRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListClustersResponse) Size() (n int) { + var l int + _ = l + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *KeyRotation) Size() (n int) { + var l int + _ = l + if m.WorkerJoinToken { + n += 2 + } + if m.ManagerJoinToken { + n += 2 + } + if m.ManagerUnlockKey { + n += 2 + } + return n +} + +func (m *UpdateClusterRequest) Size() (n int) { + var l int + _ = l + l = len(m.ClusterID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ClusterVersion != nil { + l = m.ClusterVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + l = m.Rotation.Size() + n += 1 + l + sovControl(uint64(l)) + return n +} + +func (m *UpdateClusterResponse) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetSecretRequest) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetSecretResponse) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateSecretRequest) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.SecretVersion != nil { + l = m.SecretVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateSecretResponse) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListSecretsRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListSecretsRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListSecretsResponse) Size() (n int) { + var l int + _ = l + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateSecretRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateSecretResponse) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveSecretRequest) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveSecretResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetConfigResponse) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ConfigVersion != nil { + l = m.ConfigVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateConfigResponse) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListConfigsRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListConfigsRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListConfigsResponse) Size() (n int) { + var l int + _ = l + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateConfigRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateConfigResponse) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveConfigResponse) Size() (n int) { + var l int + _ = l + return n +} + +func sovControl(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozControl(x uint64) (n int) { + return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *GetNodeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNodeRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *GetNodeResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNodeResponse{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListNodesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNodesRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListNodesRequest_Filters", "ListNodesRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListNodesRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListNodesRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Memberships:` + fmt.Sprintf("%v", this.Memberships) + `,`, + `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListNodesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNodesResponse{`, + `Nodes:` + strings.Replace(fmt.Sprintf("%v", this.Nodes), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateNodeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateNodeRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `NodeVersion:` + strings.Replace(fmt.Sprintf("%v", this.NodeVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "NodeSpec", "NodeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateNodeResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateNodeResponse{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNodeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNodeRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Force:` + fmt.Sprintf("%v", this.Force) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNodeResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNodeResponse{`, + `}`, + }, "") + return s +} +func (this *GetTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTaskRequest{`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `}`, + }, "") + return s +} +func (this *GetTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTaskResponse{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveTaskRequest{`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveTaskResponse{`, + `}`, + }, "") + return s +} +func (this *ListTasksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListTasksRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListTasksRequest_Filters", "ListTasksRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListTasksRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListTasksRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `ServiceIDs:` + fmt.Sprintf("%v", this.ServiceIDs) + `,`, + `NodeIDs:` + fmt.Sprintf("%v", this.NodeIDs) + `,`, + `DesiredStates:` + fmt.Sprintf("%v", this.DesiredStates) + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `UpToDate:` + fmt.Sprintf("%v", this.UpToDate) + `,`, + `Runtimes:` + fmt.Sprintf("%v", this.Runtimes) + `,`, + `}`, + }, "") + return s +} +func (this *ListTasksResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListTasksResponse{`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateServiceRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ServiceSpec", "ServiceSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateServiceResponse{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetServiceRequest{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `InsertDefaults:` + fmt.Sprintf("%v", this.InsertDefaults) + `,`, + `}`, + }, "") + return s +} +func (this *GetServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetServiceResponse{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateServiceRequest{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `ServiceVersion:` + strings.Replace(fmt.Sprintf("%v", this.ServiceVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ServiceSpec", "ServiceSpec", 1) + `,`, + `Rollback:` + fmt.Sprintf("%v", this.Rollback) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateServiceResponse{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveServiceRequest{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveServiceResponse{`, + `}`, + }, "") + return s +} +func (this *ListServicesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListServicesRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListServicesRequest_Filters", "ListServicesRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListServicesRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListServicesRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `Runtimes:` + fmt.Sprintf("%v", this.Runtimes) + `,`, + `}`, + }, "") + return s +} +func (this *ListServicesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListServicesResponse{`, + `Services:` + strings.Replace(fmt.Sprintf("%v", this.Services), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateNetworkRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "NetworkSpec", "NetworkSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateNetworkResponse{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNetworkRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *GetNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNetworkResponse{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNetworkRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNetworkResponse{`, + `}`, + }, "") + return s +} +func (this *ListNetworksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNetworksRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListNetworksRequest_Filters", "ListNetworksRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListNetworksRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListNetworksRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListNetworksResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNetworksResponse{`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetClusterRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetClusterRequest{`, + `ClusterID:` + fmt.Sprintf("%v", this.ClusterID) + `,`, + `}`, + }, "") + return s +} +func (this *GetClusterResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetClusterResponse{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListClustersRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListClustersRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListClustersRequest_Filters", "ListClustersRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListClustersRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListClustersRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListClustersResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListClustersResponse{`, + `Clusters:` + strings.Replace(fmt.Sprintf("%v", this.Clusters), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KeyRotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyRotation{`, + `WorkerJoinToken:` + fmt.Sprintf("%v", this.WorkerJoinToken) + `,`, + `ManagerJoinToken:` + fmt.Sprintf("%v", this.ManagerJoinToken) + `,`, + `ManagerUnlockKey:` + fmt.Sprintf("%v", this.ManagerUnlockKey) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateClusterRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateClusterRequest{`, + `ClusterID:` + fmt.Sprintf("%v", this.ClusterID) + `,`, + `ClusterVersion:` + strings.Replace(fmt.Sprintf("%v", this.ClusterVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ClusterSpec", "ClusterSpec", 1) + `,`, + `Rotation:` + strings.Replace(strings.Replace(this.Rotation.String(), "KeyRotation", "KeyRotation", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateClusterResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateClusterResponse{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetSecretRequest{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `}`, + }, "") + return s +} +func (this *GetSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetSecretResponse{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateSecretRequest{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `SecretVersion:` + strings.Replace(fmt.Sprintf("%v", this.SecretVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "SecretSpec", "SecretSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateSecretResponse{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListSecretsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListSecretsRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListSecretsRequest_Filters", "ListSecretsRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListSecretsRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListSecretsRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListSecretsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListSecretsResponse{`, + `Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateSecretRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "SecretSpec", "SecretSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateSecretResponse{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveSecretRequest{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveSecretResponse{`, + `}`, + }, "") + return s +} +func (this *GetConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetConfigRequest{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `}`, + }, "") + return s +} +func (this *GetConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetConfigResponse{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfigRequest{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `ConfigVersion:` + strings.Replace(fmt.Sprintf("%v", this.ConfigVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ConfigSpec", "ConfigSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfigResponse{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListConfigsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListConfigsRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListConfigsRequest_Filters", "ListConfigsRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListConfigsRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListConfigsRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListConfigsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListConfigsResponse{`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateConfigRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ConfigSpec", "ConfigSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateConfigResponse{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveConfigRequest{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveConfigResponse{`, + `}`, + }, "") + return s +} +func valueToStringControl(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *GetNodeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNodeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNodeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNodeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNodesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNodesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNodesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListNodesRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNodesRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType == 0 { + var v NodeSpec_Membership + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Memberships = append(m.Memberships, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v NodeSpec_Membership + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Memberships = append(m.Memberships, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Memberships", wireType) + } + case 5: + if wireType == 0 { + var v NodeRole + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Roles = append(m.Roles, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v NodeRole + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Roles = append(m.Roles, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNodesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNodesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNodesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, &Node{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateNodeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateNodeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeVersion == nil { + m.NodeVersion = &Version{} + } + if err := m.NodeVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &NodeSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateNodeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateNodeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNodeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNodeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNodeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNodeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Task == nil { + m.Task = &Task{} + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListTasksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListTasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListTasksRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceIDs = append(m.ServiceIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeIDs = append(m.NodeIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType == 0 { + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DesiredStates = append(m.DesiredStates, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DesiredStates = append(m.DesiredStates, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredStates", wireType) + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpToDate", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.UpToDate = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtimes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtimes = append(m.Runtimes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListTasksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListTasksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ServiceSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &Service{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsertDefaults", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InsertDefaults = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &Service{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceVersion == nil { + m.ServiceVersion = &Version{} + } + if err := m.ServiceVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ServiceSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Rollback", wireType) + } + m.Rollback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Rollback |= (UpdateServiceRequest_Rollback(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &Service{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServicesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListServicesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListServicesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListServicesRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServicesRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtimes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtimes = append(m.Runtimes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServicesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListServicesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListServicesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Services = append(m.Services, &Service{}) + if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &NetworkSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNetworksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNetworksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNetworksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListNetworksRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNetworksRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNetworksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNetworksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNetworksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &Network{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListClustersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListClustersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListClustersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListClustersRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListClustersRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListClustersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListClustersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListClustersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Clusters = append(m.Clusters, &Cluster{}) + if err := m.Clusters[len(m.Clusters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyRotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyRotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyRotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkerJoinToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.WorkerJoinToken = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagerJoinToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ManagerJoinToken = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagerUnlockKey", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ManagerUnlockKey = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateClusterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateClusterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateClusterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterVersion == nil { + m.ClusterVersion = &Version{} + } + if err := m.ClusterVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ClusterSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rotation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rotation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateClusterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateClusterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateClusterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretVersion == nil { + m.SecretVersion = &Version{} + } + if err := m.SecretVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &SecretSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListSecretsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListSecretsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListSecretsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListSecretsRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListSecretsRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListSecretsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListSecretsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListSecretsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, &Secret{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &SecretSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigVersion == nil { + m.ConfigVersion = &Version{} + } + if err := m.ConfigVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ConfigSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListConfigsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListConfigsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListConfigsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListConfigsRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListConfigsRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListConfigsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListConfigsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListConfigsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &Config{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ConfigSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipControl(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthControl + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipControl(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthControl = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowControl = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/control.proto", fileDescriptorControl) } + +var fileDescriptorControl = []byte{ + // 2137 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0x4f, 0x73, 0x1b, 0x49, + 0x15, 0xb7, 0xfe, 0xd8, 0x92, 0x9f, 0x6c, 0xd9, 0xee, 0x38, 0xa0, 0x52, 0x82, 0x9d, 0x9a, 0x90, + 0x44, 0xd9, 0x32, 0x12, 0xab, 0xb0, 0x6c, 0x58, 0x8a, 0x3f, 0x6b, 0x3b, 0x9b, 0xd5, 0x7a, 0xe3, + 0xa4, 0xc6, 0xc9, 0x16, 0x37, 0x95, 0x2c, 0xb5, 0xbd, 0x13, 0xc9, 0x1a, 0x31, 0x33, 0xf2, 0xae, + 0x8b, 0x0b, 0x50, 0xcb, 0x81, 0x0f, 0x40, 0x15, 0x57, 0xae, 0x1c, 0x38, 0x70, 0xe2, 0xc0, 0x07, + 0x48, 0x71, 0xe2, 0xc8, 0xc9, 0xb0, 0xaa, 0x82, 0xe2, 0xc4, 0x67, 0xa0, 0xba, 0xfb, 0xf5, 0xfc, + 0x53, 0xcf, 0x8c, 0x24, 0xab, 0xca, 0x39, 0x59, 0xd3, 0xf3, 0x7b, 0xfd, 0x5e, 0xf7, 0xfb, 0xf5, + 0x6f, 0xba, 0x5f, 0x1b, 0x76, 0x4e, 0x0d, 0xe7, 0xf3, 0xe1, 0x71, 0xb5, 0x6d, 0x9e, 0xd5, 0x3a, + 0x66, 0xbb, 0x4b, 0xad, 0x9a, 0xfd, 0x45, 0xcb, 0x3a, 0xeb, 0x1a, 0x4e, 0xad, 0x35, 0x30, 0x6a, + 0x6d, 0xb3, 0xef, 0x58, 0x66, 0xaf, 0x3a, 0xb0, 0x4c, 0xc7, 0x24, 0x44, 0x40, 0xaa, 0x12, 0x52, + 0x3d, 0x7f, 0xb7, 0xfc, 0x4e, 0x42, 0x0f, 0xf6, 0x80, 0xb6, 0x6d, 0x61, 0x5f, 0x4e, 0xf2, 0x66, + 0x1e, 0xbf, 0xa6, 0x6d, 0x47, 0xa2, 0x93, 0x7a, 0x76, 0x2e, 0x06, 0x54, 0x62, 0x37, 0x4f, 0xcd, + 0x53, 0x93, 0xff, 0xac, 0xb1, 0x5f, 0xd8, 0xfa, 0x7e, 0x4c, 0x0f, 0x1c, 0x71, 0x3c, 0x3c, 0xa9, + 0x0d, 0x7a, 0xc3, 0x53, 0xa3, 0x8f, 0x7f, 0x84, 0xa1, 0xf6, 0x1e, 0x14, 0x9f, 0x52, 0xe7, 0xd0, + 0xec, 0x50, 0x9d, 0xfe, 0x7c, 0x48, 0x6d, 0x87, 0xdc, 0x85, 0x5c, 0xdf, 0xec, 0xd0, 0xa6, 0xd1, + 0x29, 0xa5, 0xee, 0xa4, 0x2a, 0xcb, 0xbb, 0x30, 0xba, 0xdc, 0x5e, 0x62, 0x88, 0xc6, 0xbe, 0xbe, + 0xc4, 0x5e, 0x35, 0x3a, 0xda, 0x4f, 0x60, 0xcd, 0x35, 0xb3, 0x07, 0x66, 0xdf, 0xa6, 0x64, 0x07, + 0xb2, 0xec, 0x25, 0x37, 0x2a, 0xd4, 0x4b, 0xd5, 0xf1, 0x19, 0xac, 0x72, 0x3c, 0x47, 0x69, 0xff, + 0xc9, 0xc0, 0xfa, 0xa7, 0x86, 0xcd, 0xbb, 0xb0, 0xa5, 0xeb, 0x8f, 0x20, 0x77, 0x62, 0xf4, 0x1c, + 0x6a, 0xd9, 0xd8, 0xcb, 0x8e, 0xaa, 0x97, 0xb0, 0x59, 0xf5, 0x23, 0x61, 0xa3, 0x4b, 0xe3, 0xf2, + 0x6f, 0x33, 0x90, 0xc3, 0x46, 0xb2, 0x09, 0x8b, 0xfd, 0xd6, 0x19, 0x65, 0x3d, 0x66, 0x2a, 0xcb, + 0xba, 0x78, 0x20, 0x35, 0x28, 0x18, 0x9d, 0xe6, 0xc0, 0xa2, 0x27, 0xc6, 0x97, 0xd4, 0x2e, 0xa5, + 0xd9, 0xbb, 0xdd, 0xe2, 0xe8, 0x72, 0x1b, 0x1a, 0xfb, 0x2f, 0xb0, 0x55, 0x07, 0xa3, 0x23, 0x7f, + 0x93, 0x17, 0xb0, 0xd4, 0x6b, 0x1d, 0xd3, 0x9e, 0x5d, 0xca, 0xdc, 0xc9, 0x54, 0x0a, 0xf5, 0xc7, + 0xd3, 0x44, 0x56, 0xfd, 0x94, 0x9b, 0x3e, 0xe9, 0x3b, 0xd6, 0x85, 0x8e, 0xfd, 0x90, 0x67, 0x50, + 0x38, 0xa3, 0x67, 0xc7, 0xd4, 0xb2, 0x3f, 0x37, 0x06, 0x76, 0x29, 0x7b, 0x27, 0x53, 0x29, 0xd6, + 0x1f, 0x44, 0x4d, 0xdb, 0xd1, 0x80, 0xb6, 0xab, 0xcf, 0x5c, 0xfc, 0x6e, 0x7a, 0x7d, 0x41, 0xf7, + 0xdb, 0x93, 0xef, 0xc3, 0xa2, 0x65, 0xf6, 0xa8, 0x5d, 0x5a, 0xe4, 0x1d, 0xdd, 0x8e, 0x9c, 0x7f, + 0xb3, 0x47, 0xb9, 0xb5, 0x80, 0x93, 0xbb, 0xb0, 0xca, 0xa6, 0xc4, 0x9b, 0x8b, 0x25, 0x3e, 0x4f, + 0x2b, 0xac, 0x51, 0x8e, 0xbe, 0xfc, 0x03, 0x28, 0xf8, 0x86, 0x40, 0xd6, 0x21, 0xd3, 0xa5, 0x17, + 0x82, 0x1e, 0x3a, 0xfb, 0xc9, 0x66, 0xf9, 0xbc, 0xd5, 0x1b, 0xd2, 0x52, 0x9a, 0xb7, 0x89, 0x87, + 0x0f, 0xd2, 0x8f, 0x53, 0xda, 0x1e, 0x6c, 0xf8, 0xa6, 0x05, 0xb9, 0x52, 0x85, 0x45, 0xc6, 0x02, + 0x91, 0x94, 0x38, 0xb2, 0x08, 0x98, 0xf6, 0xc7, 0x14, 0x6c, 0xbc, 0x1a, 0x74, 0x5a, 0x0e, 0x9d, + 0x96, 0xa9, 0xe4, 0xc7, 0xb0, 0xc2, 0x41, 0xe7, 0xd4, 0xb2, 0x0d, 0xb3, 0xcf, 0x03, 0x2c, 0xd4, + 0x6f, 0xa9, 0x3c, 0x7e, 0x26, 0x20, 0x7a, 0x81, 0x19, 0xe0, 0x03, 0xf9, 0x2e, 0x64, 0xd9, 0xc2, + 0x2e, 0x65, 0xb8, 0xdd, 0xed, 0xb8, 0xfc, 0xe8, 0x1c, 0xa9, 0xed, 0x02, 0xf1, 0xc7, 0x3a, 0xd3, + 0xf2, 0x38, 0x84, 0x0d, 0x9d, 0x9e, 0x99, 0xe7, 0xd3, 0x8f, 0x77, 0x13, 0x16, 0x4f, 0x4c, 0xab, + 0x2d, 0x32, 0x91, 0xd7, 0xc5, 0x83, 0xb6, 0x09, 0xc4, 0xdf, 0x9f, 0x88, 0x09, 0x17, 0xff, 0xcb, + 0x96, 0xdd, 0xf5, 0xb9, 0x70, 0x5a, 0x76, 0x37, 0xe4, 0x82, 0x21, 0x98, 0x0b, 0xf6, 0xca, 0x5d, + 0xfc, 0xc2, 0xcc, 0x1b, 0x1d, 0x7b, 0x19, 0x37, 0x3a, 0x8e, 0xe7, 0x28, 0xed, 0xb1, 0x1c, 0xdd, + 0xd4, 0xae, 0xdd, 0x71, 0xf8, 0xbd, 0x6b, 0x7f, 0xcd, 0x0a, 0x31, 0x61, 0x8d, 0x33, 0x88, 0x89, + 0xdf, 0x6c, 0x5c, 0x4c, 0xfe, 0x79, 0x8d, 0x62, 0xa2, 0x8a, 0x4c, 0x29, 0x26, 0x35, 0x28, 0xd8, + 0xd4, 0x3a, 0x37, 0xda, 0x8c, 0x1d, 0x42, 0x4c, 0x30, 0x84, 0x23, 0xd1, 0xdc, 0xd8, 0xb7, 0x75, + 0x40, 0x48, 0xa3, 0x63, 0x93, 0xfb, 0x90, 0x47, 0x2e, 0x09, 0xc5, 0x58, 0xde, 0x2d, 0x8c, 0x2e, + 0xb7, 0x73, 0x82, 0x4c, 0xb6, 0x9e, 0x13, 0x6c, 0xb2, 0xc9, 0xc7, 0x50, 0xec, 0x50, 0xdb, 0xb0, + 0x68, 0xa7, 0x69, 0x3b, 0x2d, 0x07, 0xf5, 0xa1, 0x58, 0xff, 0x56, 0x54, 0x8a, 0x8f, 0x18, 0x8a, + 0x0b, 0xcc, 0x2a, 0x1a, 0xf2, 0x16, 0x85, 0xd0, 0xe4, 0xc6, 0x85, 0x86, 0xdc, 0x06, 0x18, 0x0e, + 0x9a, 0x8e, 0xd9, 0x64, 0xeb, 0xa7, 0x94, 0xe7, 0x14, 0xce, 0x0f, 0x07, 0x2f, 0xcd, 0xfd, 0x96, + 0x43, 0x49, 0x19, 0xf2, 0xd6, 0xb0, 0xef, 0x18, 0x2c, 0x03, 0xcb, 0xdc, 0xda, 0x7d, 0x9e, 0x83, + 0x44, 0xe1, 0x64, 0x7b, 0x12, 0xc5, 0x38, 0x17, 0x2b, 0x51, 0x9c, 0x84, 0x02, 0xa6, 0x1d, 0xc0, + 0xe6, 0x9e, 0x45, 0x5b, 0x0e, 0xc5, 0x09, 0x97, 0x34, 0x7c, 0x84, 0xfa, 0x21, 0x38, 0xb8, 0xad, + 0xea, 0x06, 0x2d, 0x7c, 0x12, 0x72, 0x08, 0x37, 0x43, 0x9d, 0x61, 0x54, 0xef, 0x41, 0x0e, 0x93, + 0x88, 0x1d, 0xde, 0x8a, 0xe9, 0x50, 0x97, 0x58, 0xed, 0x35, 0x6c, 0x3c, 0xa5, 0x4e, 0x28, 0xb2, + 0x1d, 0x00, 0x8f, 0x33, 0xb8, 0xe6, 0x56, 0x47, 0x97, 0xdb, 0xcb, 0x2e, 0x65, 0xf4, 0x65, 0x97, + 0x31, 0xe4, 0x01, 0xac, 0x19, 0x7d, 0x9b, 0x5a, 0x4e, 0xb3, 0x43, 0x4f, 0x5a, 0xc3, 0x9e, 0x63, + 0xa3, 0xc2, 0x14, 0x45, 0xf3, 0x3e, 0xb6, 0x6a, 0x07, 0x40, 0xfc, 0xbe, 0xae, 0x16, 0xf8, 0x9f, + 0xd3, 0xb0, 0x29, 0xc4, 0xf4, 0x4a, 0xc1, 0xef, 0xc3, 0x9a, 0x44, 0x4f, 0xf1, 0x1d, 0x28, 0xa2, + 0x8d, 0xfc, 0x14, 0x3c, 0x0a, 0x7c, 0x0a, 0x26, 0x4b, 0x25, 0x79, 0x06, 0x79, 0xcb, 0xec, 0xf5, + 0x8e, 0x5b, 0xed, 0x6e, 0x29, 0x7b, 0x27, 0x55, 0x29, 0xd6, 0xdf, 0x55, 0x19, 0xaa, 0x06, 0x59, + 0xd5, 0xd1, 0x50, 0x77, 0xbb, 0xd0, 0x34, 0xc8, 0xcb, 0x56, 0x92, 0x87, 0xec, 0xe1, 0xf3, 0xc3, + 0x27, 0xeb, 0x0b, 0x64, 0x05, 0xf2, 0x2f, 0xf4, 0x27, 0x9f, 0x35, 0x9e, 0xbf, 0x3a, 0x5a, 0x4f, + 0x31, 0xf6, 0x84, 0xba, 0xbb, 0x5a, 0x12, 0xf6, 0x61, 0x53, 0x88, 0xee, 0x55, 0x72, 0xa0, 0x7d, + 0x13, 0x6e, 0x86, 0x7a, 0x41, 0xf5, 0xfe, 0x2a, 0x03, 0x37, 0xd8, 0xfa, 0xc3, 0x76, 0x57, 0xc0, + 0x1b, 0x61, 0x01, 0xaf, 0x45, 0xc9, 0x64, 0xc8, 0x72, 0x5c, 0xc3, 0xff, 0x90, 0x9e, 0xbb, 0x86, + 0x1f, 0x85, 0x34, 0xfc, 0x87, 0x53, 0x06, 0xa7, 0x94, 0xf1, 0x31, 0x8d, 0xcc, 0x2a, 0x34, 0xd2, + 0xaf, 0x82, 0x8b, 0xf3, 0x53, 0xc1, 0xe7, 0xb0, 0x19, 0x0c, 0x17, 0x49, 0xf3, 0x3e, 0xe4, 0x31, + 0x89, 0x52, 0x0b, 0x63, 0x59, 0xe3, 0x82, 0x3d, 0x45, 0x3c, 0xa4, 0xce, 0x17, 0xa6, 0xd5, 0x9d, + 0x42, 0x11, 0xd1, 0x42, 0xa5, 0x88, 0x6e, 0x67, 0x1e, 0xa7, 0xfb, 0xa2, 0x29, 0x8e, 0xd3, 0xd2, + 0x4a, 0x62, 0xb5, 0x57, 0x5c, 0x11, 0x43, 0x91, 0x11, 0xc8, 0xb2, 0x99, 0xc6, 0xf9, 0xe2, 0xbf, + 0x19, 0xc9, 0xd1, 0x86, 0x91, 0x3c, 0xed, 0x91, 0x1c, 0x6d, 0x19, 0xc9, 0x11, 0xd0, 0xe8, 0xa0, + 0xf8, 0xcd, 0x29, 0xc6, 0x9f, 0xc9, 0x75, 0x37, 0xf7, 0x30, 0xdd, 0xb5, 0x18, 0x8a, 0x54, 0xfb, + 0x6f, 0x5a, 0xac, 0x45, 0x6c, 0x9f, 0x61, 0x2d, 0x86, 0x2c, 0xc7, 0xd7, 0xe2, 0x6f, 0xae, 0x71, + 0x2d, 0x46, 0x04, 0x37, 0xf3, 0x5a, 0x9c, 0xc3, 0x7a, 0xf3, 0x42, 0xf2, 0xd6, 0x1b, 0x26, 0x2a, + 0x76, 0xbd, 0xc9, 0xcc, 0xb9, 0x60, 0xed, 0x43, 0x4e, 0xe9, 0xbd, 0xde, 0xd0, 0x76, 0xa8, 0xe5, + 0xd3, 0xe8, 0xb6, 0x68, 0x09, 0x69, 0x34, 0xe2, 0x18, 0x2f, 0x10, 0xe0, 0xd2, 0xd7, 0xed, 0xc2, + 0xa3, 0x2f, 0x42, 0xe2, 0xe8, 0x2b, 0xad, 0x24, 0xd6, 0xe5, 0x12, 0xbe, 0x98, 0x81, 0x4b, 0x21, + 0xcb, 0xb7, 0x8b, 0x4b, 0x11, 0xc1, 0x5d, 0x27, 0x97, 0xbc, 0x90, 0x3c, 0x2e, 0x61, 0x36, 0x62, + 0xb9, 0x24, 0x53, 0xe7, 0x82, 0xb5, 0xdf, 0xa5, 0xa0, 0x70, 0x40, 0x2f, 0x74, 0xd3, 0x69, 0x39, + 0x6c, 0xeb, 0xf3, 0x0e, 0x6c, 0x30, 0x92, 0x51, 0xab, 0xf9, 0xda, 0x34, 0xfa, 0x4d, 0xc7, 0xec, + 0xd2, 0x3e, 0x0f, 0x2d, 0xaf, 0xaf, 0x89, 0x17, 0x9f, 0x98, 0x46, 0xff, 0x25, 0x6b, 0x26, 0x3b, + 0x40, 0xce, 0x5a, 0xfd, 0xd6, 0x69, 0x10, 0x2c, 0x36, 0x8b, 0xeb, 0xf8, 0x46, 0x89, 0x1e, 0xf6, + 0x7b, 0x66, 0xbb, 0xdb, 0x64, 0xa3, 0xce, 0x04, 0xd0, 0xaf, 0xf8, 0x8b, 0x03, 0x7a, 0xa1, 0xfd, + 0xda, 0xdd, 0x0f, 0x5e, 0x85, 0xe7, 0x6c, 0x3f, 0x28, 0xd1, 0xd3, 0xec, 0x07, 0xd1, 0x66, 0x8a, + 0xfd, 0x20, 0x7a, 0xf7, 0xed, 0x07, 0x3f, 0x64, 0xfb, 0x41, 0x31, 0xab, 0x7c, 0x3f, 0x18, 0x61, + 0xe8, 0x9b, 0xfc, 0xdd, 0xec, 0x9b, 0xcb, 0xed, 0x05, 0xdd, 0x35, 0xf3, 0xf6, 0x77, 0x73, 0x5a, + 0xa8, 0x3f, 0x82, 0x75, 0xbe, 0x63, 0x6f, 0x5b, 0xd4, 0x91, 0xf3, 0xf9, 0x10, 0x96, 0x6d, 0xde, + 0xe0, 0x4d, 0xe7, 0xca, 0xe8, 0x72, 0x3b, 0x2f, 0x50, 0x8d, 0x7d, 0xf6, 0x9d, 0xe7, 0xbf, 0x3a, + 0xda, 0x53, 0x3c, 0x5c, 0x08, 0x73, 0x0c, 0xa5, 0x0e, 0x4b, 0x02, 0x80, 0x91, 0x94, 0xd5, 0x7b, + 0x06, 0x6e, 0x83, 0x48, 0xed, 0x2f, 0x29, 0xb8, 0x21, 0x37, 0xae, 0xb3, 0xc5, 0x42, 0x76, 0xa1, + 0x88, 0xd0, 0x29, 0xf2, 0xba, 0x2a, 0x4c, 0x64, 0x5a, 0xeb, 0x81, 0xb4, 0x6e, 0x45, 0x07, 0xee, + 0xdb, 0x9e, 0x7c, 0xe2, 0x1d, 0x53, 0xae, 0x3c, 0x0d, 0xff, 0x4e, 0x03, 0x11, 0x3b, 0x31, 0xf6, + 0xe8, 0xca, 0xe6, 0xc7, 0x61, 0xd9, 0xac, 0x46, 0xef, 0x38, 0xfd, 0x86, 0xe3, 0xaa, 0xf9, 0xd5, + 0xfc, 0x55, 0x53, 0x0f, 0xa9, 0xe6, 0x07, 0xd3, 0xc5, 0x76, 0x2d, 0xa2, 0x79, 0x20, 0x8f, 0x1d, + 0x18, 0x11, 0xa6, 0xec, 0x7b, 0xec, 0x90, 0xc4, 0x9b, 0x50, 0x32, 0xe3, 0x72, 0x26, 0xa1, 0x5a, + 0x03, 0x6e, 0xc8, 0x13, 0xbb, 0x9f, 0xba, 0xf5, 0xc0, 0x5e, 0x77, 0x62, 0x2e, 0x05, 0xbb, 0xba, + 0x02, 0x97, 0x7e, 0x0a, 0x37, 0xe4, 0xa1, 0x6b, 0xc6, 0xd5, 0xfd, 0x0d, 0xef, 0xf0, 0xe7, 0x8f, + 0x06, 0x45, 0x63, 0xcf, 0xec, 0x9f, 0x18, 0xa7, 0xbe, 0x6e, 0xdb, 0xbc, 0x21, 0xd4, 0xad, 0x40, + 0xb1, 0x6e, 0xc5, 0x6b, 0x57, 0x34, 0xa4, 0xb9, 0x37, 0x42, 0x01, 0x88, 0x1b, 0x21, 0xda, 0x20, + 0xd2, 0x27, 0x1a, 0xb3, 0xc6, 0xc2, 0x44, 0x03, 0xa1, 0xd3, 0x88, 0x86, 0x30, 0x99, 0x42, 0x34, + 0x84, 0x67, 0x95, 0x68, 0xcc, 0x61, 0x1a, 0xa4, 0x68, 0x88, 0xe6, 0x19, 0x44, 0x23, 0x68, 0xf8, + 0x76, 0x89, 0x86, 0x3a, 0xb6, 0xeb, 0x14, 0x0d, 0x37, 0x22, 0x4f, 0x34, 0x44, 0x22, 0x62, 0x45, + 0x03, 0x73, 0x26, 0xa1, 0x9e, 0x68, 0x04, 0xa9, 0x3b, 0x81, 0x68, 0xa8, 0xb8, 0x14, 0xec, 0xea, + 0x0a, 0x5c, 0x72, 0x45, 0x63, 0xe6, 0xd5, 0xed, 0x8a, 0x46, 0x30, 0x9a, 0xfa, 0xaf, 0x6e, 0x41, + 0x6e, 0x4f, 0x5c, 0xb4, 0x12, 0x03, 0x72, 0x78, 0x85, 0x48, 0x34, 0x55, 0x50, 0xc1, 0x6b, 0xc9, + 0xf2, 0xdd, 0x58, 0x0c, 0x8a, 0xd2, 0xcd, 0xbf, 0xfd, 0xe9, 0x7f, 0xbf, 0x4f, 0xaf, 0xc1, 0x2a, + 0x07, 0x7d, 0x07, 0xb7, 0x8f, 0xc4, 0x84, 0x65, 0xf7, 0x0e, 0x8a, 0x7c, 0x7b, 0x92, 0x9b, 0xbb, + 0xf2, 0xbd, 0x04, 0x54, 0xbc, 0x43, 0x0b, 0xc0, 0xbb, 0x02, 0x22, 0xf7, 0xa2, 0x0b, 0x7e, 0xfe, + 0x11, 0xde, 0x4f, 0x82, 0x25, 0xfa, 0xf4, 0xae, 0x78, 0xd4, 0x3e, 0xc7, 0xae, 0x94, 0xd4, 0x3e, + 0x15, 0x37, 0x45, 0x11, 0x3e, 0x45, 0x0e, 0x5f, 0xb6, 0xec, 0x6e, 0x64, 0x0e, 0x7d, 0x57, 0x3c, + 0x91, 0x39, 0x0c, 0x5c, 0xe6, 0xc4, 0xe7, 0x90, 0x17, 0xe9, 0xa3, 0x73, 0xe8, 0xbf, 0x30, 0x89, + 0xce, 0x61, 0xa0, 0xd2, 0x9f, 0x38, 0x9f, 0x7c, 0x78, 0x31, 0xf3, 0xe9, 0x1f, 0xe1, 0xfd, 0x24, + 0x58, 0xa2, 0x4f, 0xaf, 0x76, 0xae, 0xf6, 0x39, 0x56, 0xc7, 0x57, 0xfb, 0x1c, 0x2f, 0xc1, 0x47, + 0xf9, 0xfc, 0x12, 0x56, 0xfc, 0x75, 0x3f, 0xf2, 0x60, 0xc2, 0x42, 0x66, 0xb9, 0x92, 0x0c, 0x8c, + 0xf7, 0xfc, 0x0b, 0x58, 0x0d, 0xdc, 0x72, 0x10, 0x65, 0x8f, 0xaa, 0x5b, 0x95, 0xf2, 0xc3, 0x09, + 0x90, 0x89, 0xce, 0x03, 0x45, 0x72, 0xb5, 0x73, 0x55, 0x59, 0x5e, 0xed, 0x5c, 0x59, 0x71, 0x8f, + 0x71, 0x1e, 0xa8, 0x85, 0xab, 0x9d, 0xab, 0x8a, 0xee, 0x6a, 0xe7, 0xea, 0xc2, 0x7a, 0x2c, 0xc9, + 0xb0, 0x7e, 0x14, 0x49, 0xb2, 0x60, 0xcd, 0x31, 0x92, 0x64, 0xe1, 0x02, 0x62, 0x3c, 0xc9, 0x64, + 0xb1, 0x2b, 0x9a, 0x64, 0xa1, 0x0a, 0x5d, 0x34, 0xc9, 0xc2, 0x75, 0xb3, 0x44, 0x92, 0xc9, 0x01, + 0xc7, 0x90, 0x2c, 0x34, 0xe6, 0x87, 0x13, 0x20, 0x27, 0xcc, 0x73, 0xac, 0x73, 0x55, 0x91, 0x37, + 0x2e, 0xcf, 0x13, 0x3a, 0x17, 0x79, 0xc6, 0xd3, 0x7e, 0x64, 0x9e, 0x83, 0x75, 0x94, 0xc8, 0x3c, + 0x87, 0x4a, 0x0d, 0x09, 0x79, 0x96, 0x85, 0xa8, 0xe8, 0x3c, 0x87, 0xaa, 0x67, 0xd1, 0x79, 0x0e, + 0xd7, 0xb4, 0x12, 0xd7, 0xb3, 0x1c, 0x70, 0xcc, 0x7a, 0x0e, 0x8d, 0xf9, 0xe1, 0x04, 0xc8, 0xc4, + 0x8f, 0x93, 0x5b, 0x02, 0x51, 0x7f, 0x9c, 0xc2, 0x05, 0x96, 0xf2, 0xbd, 0x04, 0x54, 0xe2, 0x3c, + 0xfb, 0xeb, 0x0d, 0xea, 0x79, 0x56, 0xd4, 0x52, 0xca, 0x95, 0x64, 0x60, 0xbc, 0xe7, 0x21, 0x14, + 0x7c, 0xa7, 0x66, 0x72, 0x7f, 0xb2, 0x83, 0x7e, 0xf9, 0x41, 0x22, 0x2e, 0x71, 0xc0, 0xfe, 0x43, + 0xb1, 0x7a, 0xc0, 0x8a, 0x13, 0x78, 0xb9, 0x92, 0x0c, 0x4c, 0xf4, 0xec, 0x3f, 0x00, 0xab, 0x3d, + 0x2b, 0x0e, 0xd9, 0xe5, 0x4a, 0x32, 0x70, 0x12, 0x56, 0x89, 0x2d, 0x74, 0x24, 0xab, 0x02, 0x7b, + 0xf4, 0x48, 0x56, 0x05, 0xf7, 0xe1, 0x89, 0xac, 0x42, 0x9f, 0x31, 0xac, 0x0a, 0xba, 0xad, 0x24, + 0x03, 0x27, 0x62, 0x15, 0x1e, 0xab, 0xa2, 0x59, 0x15, 0x3c, 0x09, 0x46, 0xb3, 0x2a, 0x74, 0x3e, + 0x4b, 0x64, 0x55, 0xdc, 0x80, 0x15, 0x47, 0xb4, 0x38, 0x56, 0x4d, 0x3c, 0xd5, 0xfe, 0x13, 0x52, + 0x1c, 0xab, 0x26, 0xf0, 0xac, 0x3a, 0x6c, 0x45, 0x78, 0xde, 0x2d, 0xbd, 0xf9, 0x7a, 0x6b, 0xe1, + 0x1f, 0x5f, 0x6f, 0x2d, 0xfc, 0x72, 0xb4, 0x95, 0x7a, 0x33, 0xda, 0x4a, 0xfd, 0x7d, 0xb4, 0x95, + 0xfa, 0xd7, 0x68, 0x2b, 0x75, 0xbc, 0xc4, 0xff, 0x25, 0xf4, 0xd1, 0xff, 0x03, 0x00, 0x00, 0xff, + 0xff, 0x47, 0x18, 0x50, 0x6c, 0x2b, 0x2b, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/control.proto b/extender/code/vendor/github.com/docker/swarmkit/api/control.proto new file mode 100644 index 000000000..0ca59703c --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/control.proto @@ -0,0 +1,554 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/specs.proto"; +import "github.com/docker/swarmkit/api/objects.proto"; +import "github.com/docker/swarmkit/api/types.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// Control defines the RPC methods for controlling a cluster. +service Control { + rpc GetNode(GetNodeRequest) returns (GetNodeResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListNodes(ListNodesRequest) returns (ListNodesResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc UpdateNode(UpdateNodeRequest) returns (UpdateNodeResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc RemoveNode(RemoveNodeRequest) returns (RemoveNodeResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + rpc GetTask(GetTaskRequest) returns (GetTaskResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListTasks(ListTasksRequest) returns (ListTasksResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc RemoveTask(RemoveTaskRequest) returns (RemoveTaskResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + rpc GetService(GetServiceRequest) returns (GetServiceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListServices(ListServicesRequest) returns (ListServicesResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc CreateService(CreateServiceRequest) returns (CreateServiceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc UpdateService(UpdateServiceRequest) returns (UpdateServiceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc RemoveService(RemoveServiceRequest) returns (RemoveServiceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + rpc GetNetwork(GetNetworkRequest) returns (GetNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListNetworks(ListNetworksRequest) returns (ListNetworksResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc CreateNetwork(CreateNetworkRequest) returns (CreateNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc RemoveNetwork(RemoveNetworkRequest) returns (RemoveNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + rpc GetCluster(GetClusterRequest) returns (GetClusterResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc UpdateCluster(UpdateClusterRequest) returns (UpdateClusterResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // --- secret APIs --- + + // GetSecret returns a `GetSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if getting fails. + rpc GetSecret(GetSecretRequest) returns (GetSecretResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if updating fails. + rpc UpdateSecret(UpdateSecretRequest) returns (UpdateSecretResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being + // managed, or all secrets matching any name in `ListSecretsRequest.Names`, any + // name prefix in `ListSecretsRequest.NamePrefixes`, any id in + // `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. + // - Returns an error if listing fails. + rpc ListSecrets(ListSecretsRequest) returns (ListSecretsResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + // CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based + // on the provided `CreateSecretRequest.SecretSpec`. + // - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, + // or if the secret data is too long or contains invalid characters. + // - Returns an error if the creation fails. + rpc CreateSecret(CreateSecretRequest) returns (CreateSecretResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. + // - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. + // - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. + // - Returns an error if the deletion fails. + rpc RemoveSecret(RemoveSecretRequest) returns (RemoveSecretResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // --- config APIs --- + + // GetConfig returns a `GetConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if getting fails. + rpc GetConfig(GetConfigRequest) returns (GetConfigResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if updating fails. + rpc UpdateConfig(UpdateConfigRequest) returns (UpdateConfigResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // ListConfigs returns a `ListConfigResponse` with a list of `Config`s being + // managed, or all configs matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListConfigsRequest.NamePrefixes`, any id in + // `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`. + // - Returns an error if listing fails. + rpc ListConfigs(ListConfigsRequest) returns (ListConfigsResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + // CreateConfig creates and return a `CreateConfigResponse` with a `Config` based + // on the provided `CreateConfigRequest.ConfigSpec`. + // - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + rpc CreateConfig(CreateConfigRequest) returns (CreateConfigResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`. + // - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty. + // - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found. + // - Returns an error if the deletion fails. + rpc RemoveConfig(RemoveConfigRequest) returns (RemoveConfigResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } +} + +message GetNodeRequest { + string node_id = 1; +} + +message GetNodeResponse { + Node node = 1; +} + +message ListNodesRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + repeated NodeSpec.Membership memberships = 4 [packed=false]; + repeated NodeRole roles = 5 [packed=false]; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 6; + } + + Filters filters = 1; +} + +message ListNodesResponse { + repeated Node nodes = 1; +} + +// UpdateNodeRequest requests an update to the specified node. This may be used +// to request a new availability for a node, such as PAUSE. Invalid updates +// will be denied and cause an error. +message UpdateNodeRequest { + string node_id = 1; + Version node_version = 2; + NodeSpec spec = 3; +} + +message UpdateNodeResponse { + Node node = 1; +} + +// RemoveNodeRequest requests to delete the specified node from store. +message RemoveNodeRequest { + string node_id = 1; + bool force = 2; +} + +message RemoveNodeResponse { +} + +message GetTaskRequest { + string task_id = 1; +} + +message GetTaskResponse { + Task task = 1; +} + +message RemoveTaskRequest { + string task_id = 1; +} + +message RemoveTaskResponse { +} + +message ListTasksRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + repeated string service_ids = 4; + repeated string node_ids = 5; + repeated docker.swarmkit.v1.TaskState desired_states = 6 [packed=false]; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 7; + repeated string runtimes = 9; + + // UpToDate matches tasks that are consistent with the current + // service definition. + // Note: this is intended for internal status reporting rather + // than being exposed to users. It may be removed in the future. + bool up_to_date = 8; + } + + Filters filters = 1; +} + +message ListTasksResponse { + repeated Task tasks = 1; +} + +message CreateServiceRequest { + ServiceSpec spec = 1; +} + +message CreateServiceResponse { + Service service = 1; +} + +message GetServiceRequest { + string service_id = 1; + bool insert_defaults = 2; +} + +message GetServiceResponse { + Service service = 1; +} + +message UpdateServiceRequest { + string service_id = 1; + Version service_version = 2; + ServiceSpec spec = 3; + + enum Rollback { + // This is not a rollback. The spec field of the request will + // be honored. + NONE = 0; + + // Roll back the service - get spec from the service's + // previous_spec. + PREVIOUS = 1; + } + + // Rollback may be set to PREVIOUS to request a rollback (the service's + // spec will be set to the value of its previous_spec field). In this + // case, the spec field of this request is ignored. + Rollback rollback = 4; +} + +message UpdateServiceResponse { + Service service = 1; +} + +message RemoveServiceRequest { + string service_id = 1; +} + +message RemoveServiceResponse { +} + +message ListServicesRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 4; + repeated string runtimes = 5; + } + + Filters filters = 1; +} + +message ListServicesResponse { + repeated Service services = 1; +} + +message CreateNetworkRequest { + NetworkSpec spec = 1; +} + +message CreateNetworkResponse { + Network network = 1; +} + +message GetNetworkRequest { + string name = 1; + string network_id = 2; +} + +message GetNetworkResponse { + Network network = 1; +} + +message RemoveNetworkRequest { + string name = 1; + string network_id = 2; +} + +message RemoveNetworkResponse {} + +message ListNetworksRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 4; + } + + Filters filters = 1; +} + +message ListNetworksResponse { + repeated Network networks = 1; +} + +message GetClusterRequest { + string cluster_id = 1; +} + +message GetClusterResponse { + Cluster cluster = 1; +} + +message ListClustersRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 4; + } + + Filters filters = 1; +} + +message ListClustersResponse { + repeated Cluster clusters = 1; +} + +// KeyRotation tells UpdateCluster what items to rotate +message KeyRotation { + // WorkerJoinToken tells UpdateCluster to rotate the worker secret token. + bool worker_join_token = 1; + + // ManagerJoinToken tells UpdateCluster to rotate the manager secret token. + bool manager_join_token = 2; + + // ManagerUnlockKey tells UpdateCluster to rotate the manager unlock key + bool manager_unlock_key = 3; + +} + +message UpdateClusterRequest { + // ClusterID is the cluster ID to update. + string cluster_id = 1; + + // ClusterVersion is the version of the cluster being updated. + Version cluster_version = 2; + + // Spec is the new spec to apply to the cluster. + ClusterSpec spec = 3; + + // Rotation contains flags for join token and unlock key rotation + KeyRotation rotation = 4 [(gogoproto.nullable) = false]; +} + +message UpdateClusterResponse { + Cluster cluster = 1; +} + +// GetSecretRequest is the request to get a `Secret` object given a secret id. +message GetSecretRequest { + string secret_id = 1; +} + +// GetSecretResponse contains the Secret corresponding to the id in +// `GetSecretRequest`, but the `Secret.Spec.Data` field in each `Secret` +// object should be nil instead of actually containing the secret bytes. +message GetSecretResponse { + Secret secret = 1; +} + +message UpdateSecretRequest { + // SecretID is the secret ID to update. + string secret_id = 1; + + // SecretVersion is the version of the secret being updated. + Version secret_version = 2; + + // Spec is the new spec to apply to the Secret + // Only some fields are allowed to be updated. + SecretSpec spec = 3; +} + +message UpdateSecretResponse { + Secret secret = 1; +} + +// ListSecretRequest is the request to list all non-internal secrets in the secret store, +// or all secrets filtered by (name or name prefix or id prefix) and labels. +message ListSecretsRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + repeated string name_prefixes = 4; + } + + Filters filters = 1; +} + +// ListSecretResponse contains a list of all the secrets that match the name or +// name prefix filters provided in `ListSecretRequest`. The `Secret.Spec.Data` +// field in each `Secret` object should be nil instead of actually containing +// the secret bytes. +message ListSecretsResponse { + repeated Secret secrets = 1; +} + +// CreateSecretRequest specifies a new secret (it will not update an existing +// secret) to create. +message CreateSecretRequest { + SecretSpec spec = 1; +} + +// CreateSecretResponse contains the newly created `Secret` corresponding to the +// name in `CreateSecretRequest`. The `Secret.Spec.Data` field should be nil instead +// of actually containing the secret bytes. +message CreateSecretResponse { + Secret secret = 1; +} + +// RemoveSecretRequest contains the ID of the secret that should be removed. This +// removes all versions of the secret. +message RemoveSecretRequest { + string secret_id = 1; +} + +// RemoveSecretResponse is an empty object indicating the successful removal of +// a secret. +message RemoveSecretResponse {} + +// GetConfigRequest is the request to get a `Config` object given a config id. +message GetConfigRequest { + string config_id = 1; +} + +// GetConfigResponse contains the Config corresponding to the id in +// `GetConfigRequest`. +message GetConfigResponse { + Config config = 1; +} + +message UpdateConfigRequest { + // ConfigID is the config ID to update. + string config_id = 1; + + // ConfigVersion is the version of the config being updated. + Version config_version = 2; + + // Spec is the new spec to apply to the Config + // Only some fields are allowed to be updated. + ConfigSpec spec = 3; +} + +message UpdateConfigResponse { + Config config = 1; +} + +// ListConfigRequest is the request to list all configs in the config store, +// or all configs filtered by (name or name prefix or id prefix) and labels. +message ListConfigsRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + repeated string name_prefixes = 4; + } + + Filters filters = 1; +} + +// ListConfigResponse contains a list of all the configs that match the name or +// name prefix filters provided in `ListConfigRequest`. +message ListConfigsResponse { + repeated Config configs = 1; +} + +// CreateConfigRequest specifies a new config (it will not update an existing +// config) to create. +message CreateConfigRequest { + ConfigSpec spec = 1; +} + +// CreateConfigResponse contains the newly created `Config` corresponding to the +// name in `CreateConfigRequest`. +message CreateConfigResponse { + Config config = 1; +} + +// RemoveConfigRequest contains the ID of the config that should be removed. This +// removes all versions of the config. +message RemoveConfigRequest { + string config_id = 1; +} + +// RemoveConfigResponse is an empty object indicating the successful removal of +// a config. +message RemoveConfigResponse {} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go b/extender/code/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go new file mode 100644 index 000000000..8880bd606 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go @@ -0,0 +1,56 @@ +package deepcopy + +import ( + "fmt" + "time" + + "github.com/gogo/protobuf/types" +) + +// CopierFrom can be implemented if an object knows how to copy another into itself. +type CopierFrom interface { + // Copy takes the fields from src and copies them into the target object. + // + // Calling this method with a nil receiver or a nil src may panic. + CopyFrom(src interface{}) +} + +// Copy copies src into dst. dst and src must have the same type. +// +// If the type has a copy function defined, it will be used. +// +// Default implementations for builtin types and well known protobuf types may +// be provided. +// +// If the copy cannot be performed, this function will panic. Make sure to test +// types that use this function. +func Copy(dst, src interface{}) { + switch dst := dst.(type) { + case *types.Any: + src := src.(*types.Any) + dst.TypeUrl = src.TypeUrl + if src.Value != nil { + dst.Value = make([]byte, len(src.Value)) + copy(dst.Value, src.Value) + } else { + dst.Value = nil + } + case *types.Duration: + src := src.(*types.Duration) + *dst = *src + case *time.Duration: + src := src.(*time.Duration) + *dst = *src + case *types.Timestamp: + src := src.(*types.Timestamp) + *dst = *src + case *types.BoolValue: + src := src.(*types.BoolValue) + *dst = *src + case CopierFrom: + dst.CopyFrom(src) + default: + panic(fmt.Sprintf("Copy for %T not implemented", dst)) + } + +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go b/extender/code/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go new file mode 100644 index 000000000..cc443848d --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go @@ -0,0 +1,3830 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/dispatcher.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" +import _ "github.com/gogo/protobuf/types" + +import time "time" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import types "github.com/gogo/protobuf/types" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +type AssignmentChange_AssignmentAction int32 + +const ( + AssignmentChange_AssignmentActionUpdate AssignmentChange_AssignmentAction = 0 + AssignmentChange_AssignmentActionRemove AssignmentChange_AssignmentAction = 1 +) + +var AssignmentChange_AssignmentAction_name = map[int32]string{ + 0: "UPDATE", + 1: "REMOVE", +} +var AssignmentChange_AssignmentAction_value = map[string]int32{ + "UPDATE": 0, + "REMOVE": 1, +} + +func (x AssignmentChange_AssignmentAction) String() string { + return proto.EnumName(AssignmentChange_AssignmentAction_name, int32(x)) +} +func (AssignmentChange_AssignmentAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{10, 0} +} + +// AssignmentType specifies whether this assignment message carries +// the full state, or is an update to an existing state. +type AssignmentsMessage_Type int32 + +const ( + AssignmentsMessage_COMPLETE AssignmentsMessage_Type = 0 + AssignmentsMessage_INCREMENTAL AssignmentsMessage_Type = 1 +) + +var AssignmentsMessage_Type_name = map[int32]string{ + 0: "COMPLETE", + 1: "INCREMENTAL", +} +var AssignmentsMessage_Type_value = map[string]int32{ + "COMPLETE": 0, + "INCREMENTAL": 1, +} + +func (x AssignmentsMessage_Type) String() string { + return proto.EnumName(AssignmentsMessage_Type_name, int32(x)) +} +func (AssignmentsMessage_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{11, 0} +} + +// SessionRequest starts a session. +type SessionRequest struct { + Description *NodeDescription `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // SessionID can be provided to attempt resuming an existing session. If the + // SessionID is empty or invalid, a new SessionID will be assigned. + // + // See SessionMessage.SessionID for details. + SessionID string `protobuf:"bytes,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *SessionRequest) Reset() { *m = SessionRequest{} } +func (*SessionRequest) ProtoMessage() {} +func (*SessionRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{0} } + +// SessionMessage instructs an agent on various actions as part of the current +// session. An agent should act immediately on the contents. +type SessionMessage struct { + // SessionID is allocated after a successful registration. It should be + // used on all RPC calls after registration. A dispatcher may choose to + // change the SessionID, at which time an agent must re-register and obtain + // a new one. + // + // All Dispatcher calls after register should include the SessionID. If the + // Dispatcher so chooses, it may reject the call with an InvalidArgument + // error code, at which time the agent should call Register to start a new + // session. + // + // As a rule, once an agent has a SessionID, it should never save it to + // disk or try to otherwise reuse. If the agent loses its SessionID, it + // must start a new session through a call to Register. A Dispatcher may + // choose to reuse the SessionID, if it sees fit, but it is not advised. + // + // The actual implementation of the SessionID is Dispatcher specific and + // should be treated as opaque by agents. + // + // From a Dispatcher perspective, there are many ways to use the SessionID + // to ensure uniqueness of a set of client RPC calls. One method is to keep + // the SessionID unique to every call to Register in a single Dispatcher + // instance. This ensures that the SessionID represents the unique + // session from a single Agent to Manager. If the Agent restarts, we + // allocate a new session, since the restarted Agent is not aware of the + // new SessionID. + // + // The most compelling use case is to support duplicate node detection. If + // one clones a virtual machine, including certificate material, two nodes + // may end up with the same identity. This can also happen if two identical + // agent processes are coming from the same node. If the SessionID is + // replicated through the cluster, we can immediately detect the condition + // and address it. + // + // Extending from the case above, we can actually detect a compromised + // identity. Coupled with provisions to rebuild node identity, we can ban + // the compromised node identity and have the nodes re-authenticate and + // build a new identity. At this time, an administrator can then + // re-authorize the compromised nodes, if it was a mistake or ensure that a + // misbehaved node can no longer connect to the cluster. + // + // We considered placing this field in a GRPC header. Because this is a + // critical feature of the protocol, we thought it should be represented + // directly in the RPC message set. + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + // Node identifies the registering node. + Node *Node `protobuf:"bytes,2,opt,name=node" json:"node,omitempty"` + // Managers provides a weight list of alternative dispatchers + Managers []*WeightedPeer `protobuf:"bytes,3,rep,name=managers" json:"managers,omitempty"` + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + NetworkBootstrapKeys []*EncryptionKey `protobuf:"bytes,4,rep,name=network_bootstrap_keys,json=networkBootstrapKeys" json:"network_bootstrap_keys,omitempty"` + // Which root certificates to trust + RootCA []byte `protobuf:"bytes,5,opt,name=RootCA,proto3" json:"RootCA,omitempty"` +} + +func (m *SessionMessage) Reset() { *m = SessionMessage{} } +func (*SessionMessage) ProtoMessage() {} +func (*SessionMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{1} } + +// HeartbeatRequest provides identifying properties for a single heartbeat. +type HeartbeatRequest struct { + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *HeartbeatRequest) Reset() { *m = HeartbeatRequest{} } +func (*HeartbeatRequest) ProtoMessage() {} +func (*HeartbeatRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{2} } + +type HeartbeatResponse struct { + // Period is the duration to wait before sending the next heartbeat. + // Well-behaved agents should update this on every heartbeat round trip. + Period time.Duration `protobuf:"bytes,1,opt,name=period,stdduration" json:"period"` +} + +func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} } +func (*HeartbeatResponse) ProtoMessage() {} +func (*HeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{3} } + +type UpdateTaskStatusRequest struct { + // Tasks should contain all statuses for running tasks. Only the status + // field must be set. The spec is not required. + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + Updates []*UpdateTaskStatusRequest_TaskStatusUpdate `protobuf:"bytes,3,rep,name=updates" json:"updates,omitempty"` +} + +func (m *UpdateTaskStatusRequest) Reset() { *m = UpdateTaskStatusRequest{} } +func (*UpdateTaskStatusRequest) ProtoMessage() {} +func (*UpdateTaskStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{4} +} + +type UpdateTaskStatusRequest_TaskStatusUpdate struct { + TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + Status *TaskStatus `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Reset() { + *m = UpdateTaskStatusRequest_TaskStatusUpdate{} +} +func (*UpdateTaskStatusRequest_TaskStatusUpdate) ProtoMessage() {} +func (*UpdateTaskStatusRequest_TaskStatusUpdate) Descriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{4, 0} +} + +type UpdateTaskStatusResponse struct { +} + +func (m *UpdateTaskStatusResponse) Reset() { *m = UpdateTaskStatusResponse{} } +func (*UpdateTaskStatusResponse) ProtoMessage() {} +func (*UpdateTaskStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{5} +} + +type TasksRequest struct { + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *TasksRequest) Reset() { *m = TasksRequest{} } +func (*TasksRequest) ProtoMessage() {} +func (*TasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{6} } + +type TasksMessage struct { + // Tasks is the set of tasks that should be running on the node. + // Tasks outside of this set running on the node should be terminated. + Tasks []*Task `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"` +} + +func (m *TasksMessage) Reset() { *m = TasksMessage{} } +func (*TasksMessage) ProtoMessage() {} +func (*TasksMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{7} } + +type AssignmentsRequest struct { + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *AssignmentsRequest) Reset() { *m = AssignmentsRequest{} } +func (*AssignmentsRequest) ProtoMessage() {} +func (*AssignmentsRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{8} } + +type Assignment struct { + // Types that are valid to be assigned to Item: + // *Assignment_Task + // *Assignment_Secret + // *Assignment_Config + Item isAssignment_Item `protobuf_oneof:"item"` +} + +func (m *Assignment) Reset() { *m = Assignment{} } +func (*Assignment) ProtoMessage() {} +func (*Assignment) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{9} } + +type isAssignment_Item interface { + isAssignment_Item() + MarshalTo([]byte) (int, error) + Size() int +} + +type Assignment_Task struct { + Task *Task `protobuf:"bytes,1,opt,name=task,oneof"` +} +type Assignment_Secret struct { + Secret *Secret `protobuf:"bytes,2,opt,name=secret,oneof"` +} +type Assignment_Config struct { + Config *Config `protobuf:"bytes,3,opt,name=config,oneof"` +} + +func (*Assignment_Task) isAssignment_Item() {} +func (*Assignment_Secret) isAssignment_Item() {} +func (*Assignment_Config) isAssignment_Item() {} + +func (m *Assignment) GetItem() isAssignment_Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *Assignment) GetTask() *Task { + if x, ok := m.GetItem().(*Assignment_Task); ok { + return x.Task + } + return nil +} + +func (m *Assignment) GetSecret() *Secret { + if x, ok := m.GetItem().(*Assignment_Secret); ok { + return x.Secret + } + return nil +} + +func (m *Assignment) GetConfig() *Config { + if x, ok := m.GetItem().(*Assignment_Config); ok { + return x.Config + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Assignment) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Assignment_OneofMarshaler, _Assignment_OneofUnmarshaler, _Assignment_OneofSizer, []interface{}{ + (*Assignment_Task)(nil), + (*Assignment_Secret)(nil), + (*Assignment_Config)(nil), + } +} + +func _Assignment_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Assignment) + // item + switch x := m.Item.(type) { + case *Assignment_Task: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Task); err != nil { + return err + } + case *Assignment_Secret: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Secret); err != nil { + return err + } + case *Assignment_Config: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Assignment.Item has unexpected type %T", x) + } + return nil +} + +func _Assignment_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Assignment) + switch tag { + case 1: // item.task + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Task) + err := b.DecodeMessage(msg) + m.Item = &Assignment_Task{msg} + return true, err + case 2: // item.secret + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Secret) + err := b.DecodeMessage(msg) + m.Item = &Assignment_Secret{msg} + return true, err + case 3: // item.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Config) + err := b.DecodeMessage(msg) + m.Item = &Assignment_Config{msg} + return true, err + default: + return false, nil + } +} + +func _Assignment_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Assignment) + // item + switch x := m.Item.(type) { + case *Assignment_Task: + s := proto.Size(x.Task) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Assignment_Secret: + s := proto.Size(x.Secret) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Assignment_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type AssignmentChange struct { + Assignment *Assignment `protobuf:"bytes,1,opt,name=assignment" json:"assignment,omitempty"` + Action AssignmentChange_AssignmentAction `protobuf:"varint,2,opt,name=action,proto3,enum=docker.swarmkit.v1.AssignmentChange_AssignmentAction" json:"action,omitempty"` +} + +func (m *AssignmentChange) Reset() { *m = AssignmentChange{} } +func (*AssignmentChange) ProtoMessage() {} +func (*AssignmentChange) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{10} } + +type AssignmentsMessage struct { + Type AssignmentsMessage_Type `protobuf:"varint,1,opt,name=type,proto3,enum=docker.swarmkit.v1.AssignmentsMessage_Type" json:"type,omitempty"` + // AppliesTo references the previous ResultsIn value, to chain + // incremental updates together. For the first update in a stream, + // AppliesTo is empty. If AppliesTo does not match the previously + // received ResultsIn, the consumer of the stream should start a new + // Assignments stream to re-sync. + AppliesTo string `protobuf:"bytes,2,opt,name=applies_to,json=appliesTo,proto3" json:"applies_to,omitempty"` + // ResultsIn identifies the result of this assignments message, to + // match against the next message's AppliesTo value and protect + // against missed messages. + ResultsIn string `protobuf:"bytes,3,opt,name=results_in,json=resultsIn,proto3" json:"results_in,omitempty"` + // AssignmentChange is a set of changes to apply on this node. + Changes []*AssignmentChange `protobuf:"bytes,4,rep,name=changes" json:"changes,omitempty"` +} + +func (m *AssignmentsMessage) Reset() { *m = AssignmentsMessage{} } +func (*AssignmentsMessage) ProtoMessage() {} +func (*AssignmentsMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{11} } + +func init() { + proto.RegisterType((*SessionRequest)(nil), "docker.swarmkit.v1.SessionRequest") + proto.RegisterType((*SessionMessage)(nil), "docker.swarmkit.v1.SessionMessage") + proto.RegisterType((*HeartbeatRequest)(nil), "docker.swarmkit.v1.HeartbeatRequest") + proto.RegisterType((*HeartbeatResponse)(nil), "docker.swarmkit.v1.HeartbeatResponse") + proto.RegisterType((*UpdateTaskStatusRequest)(nil), "docker.swarmkit.v1.UpdateTaskStatusRequest") + proto.RegisterType((*UpdateTaskStatusRequest_TaskStatusUpdate)(nil), "docker.swarmkit.v1.UpdateTaskStatusRequest.TaskStatusUpdate") + proto.RegisterType((*UpdateTaskStatusResponse)(nil), "docker.swarmkit.v1.UpdateTaskStatusResponse") + proto.RegisterType((*TasksRequest)(nil), "docker.swarmkit.v1.TasksRequest") + proto.RegisterType((*TasksMessage)(nil), "docker.swarmkit.v1.TasksMessage") + proto.RegisterType((*AssignmentsRequest)(nil), "docker.swarmkit.v1.AssignmentsRequest") + proto.RegisterType((*Assignment)(nil), "docker.swarmkit.v1.Assignment") + proto.RegisterType((*AssignmentChange)(nil), "docker.swarmkit.v1.AssignmentChange") + proto.RegisterType((*AssignmentsMessage)(nil), "docker.swarmkit.v1.AssignmentsMessage") + proto.RegisterEnum("docker.swarmkit.v1.AssignmentChange_AssignmentAction", AssignmentChange_AssignmentAction_name, AssignmentChange_AssignmentAction_value) + proto.RegisterEnum("docker.swarmkit.v1.AssignmentsMessage_Type", AssignmentsMessage_Type_name, AssignmentsMessage_Type_value) +} + +type authenticatedWrapperDispatcherServer struct { + local DispatcherServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperDispatcherServer(local DispatcherServer, authorize func(context.Context, []string) error) DispatcherServer { + return &authenticatedWrapperDispatcherServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperDispatcherServer) Session(r *SessionRequest, stream Dispatcher_SessionServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.Session(r, stream) +} + +func (p *authenticatedWrapperDispatcherServer) Heartbeat(ctx context.Context, r *HeartbeatRequest) (*HeartbeatResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.Heartbeat(ctx, r) +} + +func (p *authenticatedWrapperDispatcherServer) UpdateTaskStatus(ctx context.Context, r *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateTaskStatus(ctx, r) +} + +func (p *authenticatedWrapperDispatcherServer) Tasks(r *TasksRequest, stream Dispatcher_TasksServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.Tasks(r, stream) +} + +func (p *authenticatedWrapperDispatcherServer) Assignments(r *AssignmentsRequest, stream Dispatcher_AssignmentsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.Assignments(r, stream) +} + +func (m *SessionRequest) Copy() *SessionRequest { + if m == nil { + return nil + } + o := &SessionRequest{} + o.CopyFrom(m) + return o +} + +func (m *SessionRequest) CopyFrom(src interface{}) { + + o := src.(*SessionRequest) + *m = *o + if o.Description != nil { + m.Description = &NodeDescription{} + deepcopy.Copy(m.Description, o.Description) + } +} + +func (m *SessionMessage) Copy() *SessionMessage { + if m == nil { + return nil + } + o := &SessionMessage{} + o.CopyFrom(m) + return o +} + +func (m *SessionMessage) CopyFrom(src interface{}) { + + o := src.(*SessionMessage) + *m = *o + if o.Node != nil { + m.Node = &Node{} + deepcopy.Copy(m.Node, o.Node) + } + if o.Managers != nil { + m.Managers = make([]*WeightedPeer, len(o.Managers)) + for i := range m.Managers { + m.Managers[i] = &WeightedPeer{} + deepcopy.Copy(m.Managers[i], o.Managers[i]) + } + } + + if o.NetworkBootstrapKeys != nil { + m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys)) + for i := range m.NetworkBootstrapKeys { + m.NetworkBootstrapKeys[i] = &EncryptionKey{} + deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) + } + } + + if o.RootCA != nil { + m.RootCA = make([]byte, len(o.RootCA)) + copy(m.RootCA, o.RootCA) + } +} + +func (m *HeartbeatRequest) Copy() *HeartbeatRequest { + if m == nil { + return nil + } + o := &HeartbeatRequest{} + o.CopyFrom(m) + return o +} + +func (m *HeartbeatRequest) CopyFrom(src interface{}) { + + o := src.(*HeartbeatRequest) + *m = *o +} + +func (m *HeartbeatResponse) Copy() *HeartbeatResponse { + if m == nil { + return nil + } + o := &HeartbeatResponse{} + o.CopyFrom(m) + return o +} + +func (m *HeartbeatResponse) CopyFrom(src interface{}) { + + o := src.(*HeartbeatResponse) + *m = *o + deepcopy.Copy(&m.Period, &o.Period) +} + +func (m *UpdateTaskStatusRequest) Copy() *UpdateTaskStatusRequest { + if m == nil { + return nil + } + o := &UpdateTaskStatusRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateTaskStatusRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateTaskStatusRequest) + *m = *o + if o.Updates != nil { + m.Updates = make([]*UpdateTaskStatusRequest_TaskStatusUpdate, len(o.Updates)) + for i := range m.Updates { + m.Updates[i] = &UpdateTaskStatusRequest_TaskStatusUpdate{} + deepcopy.Copy(m.Updates[i], o.Updates[i]) + } + } + +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Copy() *UpdateTaskStatusRequest_TaskStatusUpdate { + if m == nil { + return nil + } + o := &UpdateTaskStatusRequest_TaskStatusUpdate{} + o.CopyFrom(m) + return o +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) CopyFrom(src interface{}) { + + o := src.(*UpdateTaskStatusRequest_TaskStatusUpdate) + *m = *o + if o.Status != nil { + m.Status = &TaskStatus{} + deepcopy.Copy(m.Status, o.Status) + } +} + +func (m *UpdateTaskStatusResponse) Copy() *UpdateTaskStatusResponse { + if m == nil { + return nil + } + o := &UpdateTaskStatusResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateTaskStatusResponse) CopyFrom(src interface{}) {} +func (m *TasksRequest) Copy() *TasksRequest { + if m == nil { + return nil + } + o := &TasksRequest{} + o.CopyFrom(m) + return o +} + +func (m *TasksRequest) CopyFrom(src interface{}) { + + o := src.(*TasksRequest) + *m = *o +} + +func (m *TasksMessage) Copy() *TasksMessage { + if m == nil { + return nil + } + o := &TasksMessage{} + o.CopyFrom(m) + return o +} + +func (m *TasksMessage) CopyFrom(src interface{}) { + + o := src.(*TasksMessage) + *m = *o + if o.Tasks != nil { + m.Tasks = make([]*Task, len(o.Tasks)) + for i := range m.Tasks { + m.Tasks[i] = &Task{} + deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + } + } + +} + +func (m *AssignmentsRequest) Copy() *AssignmentsRequest { + if m == nil { + return nil + } + o := &AssignmentsRequest{} + o.CopyFrom(m) + return o +} + +func (m *AssignmentsRequest) CopyFrom(src interface{}) { + + o := src.(*AssignmentsRequest) + *m = *o +} + +func (m *Assignment) Copy() *Assignment { + if m == nil { + return nil + } + o := &Assignment{} + o.CopyFrom(m) + return o +} + +func (m *Assignment) CopyFrom(src interface{}) { + + o := src.(*Assignment) + *m = *o + if o.Item != nil { + switch o.Item.(type) { + case *Assignment_Task: + v := Assignment_Task{ + Task: &Task{}, + } + deepcopy.Copy(v.Task, o.GetTask()) + m.Item = &v + case *Assignment_Secret: + v := Assignment_Secret{ + Secret: &Secret{}, + } + deepcopy.Copy(v.Secret, o.GetSecret()) + m.Item = &v + case *Assignment_Config: + v := Assignment_Config{ + Config: &Config{}, + } + deepcopy.Copy(v.Config, o.GetConfig()) + m.Item = &v + } + } + +} + +func (m *AssignmentChange) Copy() *AssignmentChange { + if m == nil { + return nil + } + o := &AssignmentChange{} + o.CopyFrom(m) + return o +} + +func (m *AssignmentChange) CopyFrom(src interface{}) { + + o := src.(*AssignmentChange) + *m = *o + if o.Assignment != nil { + m.Assignment = &Assignment{} + deepcopy.Copy(m.Assignment, o.Assignment) + } +} + +func (m *AssignmentsMessage) Copy() *AssignmentsMessage { + if m == nil { + return nil + } + o := &AssignmentsMessage{} + o.CopyFrom(m) + return o +} + +func (m *AssignmentsMessage) CopyFrom(src interface{}) { + + o := src.(*AssignmentsMessage) + *m = *o + if o.Changes != nil { + m.Changes = make([]*AssignmentChange, len(o.Changes)) + for i := range m.Changes { + m.Changes[i] = &AssignmentChange{} + deepcopy.Copy(m.Changes[i], o.Changes[i]) + } + } + +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Dispatcher service + +type DispatcherClient interface { + // Session starts an agent session with the dispatcher. The session is + // started after the first SessionMessage is received. + // + // Once started, the agent is controlled with a stream of SessionMessage. + // Agents should list on the stream at all times for instructions. + Session(ctx context.Context, in *SessionRequest, opts ...grpc.CallOption) (Dispatcher_SessionClient, error) + // Heartbeat is heartbeat method for nodes. It returns new TTL in response. + // Node should send new heartbeat earlier than now + TTL, otherwise it will + // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN + Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) + // UpdateTaskStatus updates status of task. Node should send such updates + // on every status change of its tasks. + // + // Whether receiving batch updates or single status updates, this method + // should be accepting. Errors should only be returned if the entire update + // should be retried, due to data loss or other problems. + // + // If a task is unknown the dispatcher, the status update should be + // accepted regardless. + UpdateTaskStatus(ctx context.Context, in *UpdateTaskStatusRequest, opts ...grpc.CallOption) (*UpdateTaskStatusResponse, error) + // Tasks is a stream of tasks state for node. Each message contains full list + // of tasks which should be run on node, if task is not present in that list, + // it should be terminated. + Tasks(ctx context.Context, in *TasksRequest, opts ...grpc.CallOption) (Dispatcher_TasksClient, error) + // Assignments is a stream of assignments such as tasks and secrets for node. + // The first message in the stream contains all of the tasks and secrets + // that are relevant to the node. Future messages in the stream are updates to + // the set of assignments. + Assignments(ctx context.Context, in *AssignmentsRequest, opts ...grpc.CallOption) (Dispatcher_AssignmentsClient, error) +} + +type dispatcherClient struct { + cc *grpc.ClientConn +} + +func NewDispatcherClient(cc *grpc.ClientConn) DispatcherClient { + return &dispatcherClient{cc} +} + +func (c *dispatcherClient) Session(ctx context.Context, in *SessionRequest, opts ...grpc.CallOption) (Dispatcher_SessionClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Dispatcher/Session", opts...) + if err != nil { + return nil, err + } + x := &dispatcherSessionClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Dispatcher_SessionClient interface { + Recv() (*SessionMessage, error) + grpc.ClientStream +} + +type dispatcherSessionClient struct { + grpc.ClientStream +} + +func (x *dispatcherSessionClient) Recv() (*SessionMessage, error) { + m := new(SessionMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dispatcherClient) Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) { + out := new(HeartbeatResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Dispatcher/Heartbeat", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dispatcherClient) UpdateTaskStatus(ctx context.Context, in *UpdateTaskStatusRequest, opts ...grpc.CallOption) (*UpdateTaskStatusResponse, error) { + out := new(UpdateTaskStatusResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Dispatcher/UpdateTaskStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dispatcherClient) Tasks(ctx context.Context, in *TasksRequest, opts ...grpc.CallOption) (Dispatcher_TasksClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[1], c.cc, "/docker.swarmkit.v1.Dispatcher/Tasks", opts...) + if err != nil { + return nil, err + } + x := &dispatcherTasksClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Dispatcher_TasksClient interface { + Recv() (*TasksMessage, error) + grpc.ClientStream +} + +type dispatcherTasksClient struct { + grpc.ClientStream +} + +func (x *dispatcherTasksClient) Recv() (*TasksMessage, error) { + m := new(TasksMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dispatcherClient) Assignments(ctx context.Context, in *AssignmentsRequest, opts ...grpc.CallOption) (Dispatcher_AssignmentsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[2], c.cc, "/docker.swarmkit.v1.Dispatcher/Assignments", opts...) + if err != nil { + return nil, err + } + x := &dispatcherAssignmentsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Dispatcher_AssignmentsClient interface { + Recv() (*AssignmentsMessage, error) + grpc.ClientStream +} + +type dispatcherAssignmentsClient struct { + grpc.ClientStream +} + +func (x *dispatcherAssignmentsClient) Recv() (*AssignmentsMessage, error) { + m := new(AssignmentsMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Dispatcher service + +type DispatcherServer interface { + // Session starts an agent session with the dispatcher. The session is + // started after the first SessionMessage is received. + // + // Once started, the agent is controlled with a stream of SessionMessage. + // Agents should list on the stream at all times for instructions. + Session(*SessionRequest, Dispatcher_SessionServer) error + // Heartbeat is heartbeat method for nodes. It returns new TTL in response. + // Node should send new heartbeat earlier than now + TTL, otherwise it will + // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN + Heartbeat(context.Context, *HeartbeatRequest) (*HeartbeatResponse, error) + // UpdateTaskStatus updates status of task. Node should send such updates + // on every status change of its tasks. + // + // Whether receiving batch updates or single status updates, this method + // should be accepting. Errors should only be returned if the entire update + // should be retried, due to data loss or other problems. + // + // If a task is unknown the dispatcher, the status update should be + // accepted regardless. + UpdateTaskStatus(context.Context, *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) + // Tasks is a stream of tasks state for node. Each message contains full list + // of tasks which should be run on node, if task is not present in that list, + // it should be terminated. + Tasks(*TasksRequest, Dispatcher_TasksServer) error + // Assignments is a stream of assignments such as tasks and secrets for node. + // The first message in the stream contains all of the tasks and secrets + // that are relevant to the node. Future messages in the stream are updates to + // the set of assignments. + Assignments(*AssignmentsRequest, Dispatcher_AssignmentsServer) error +} + +func RegisterDispatcherServer(s *grpc.Server, srv DispatcherServer) { + s.RegisterService(&_Dispatcher_serviceDesc, srv) +} + +func _Dispatcher_Session_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SessionRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DispatcherServer).Session(m, &dispatcherSessionServer{stream}) +} + +type Dispatcher_SessionServer interface { + Send(*SessionMessage) error + grpc.ServerStream +} + +type dispatcherSessionServer struct { + grpc.ServerStream +} + +func (x *dispatcherSessionServer) Send(m *SessionMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _Dispatcher_Heartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HeartbeatRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DispatcherServer).Heartbeat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Dispatcher/Heartbeat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DispatcherServer).Heartbeat(ctx, req.(*HeartbeatRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dispatcher_UpdateTaskStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTaskStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DispatcherServer).UpdateTaskStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Dispatcher/UpdateTaskStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DispatcherServer).UpdateTaskStatus(ctx, req.(*UpdateTaskStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dispatcher_Tasks_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(TasksRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DispatcherServer).Tasks(m, &dispatcherTasksServer{stream}) +} + +type Dispatcher_TasksServer interface { + Send(*TasksMessage) error + grpc.ServerStream +} + +type dispatcherTasksServer struct { + grpc.ServerStream +} + +func (x *dispatcherTasksServer) Send(m *TasksMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _Dispatcher_Assignments_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(AssignmentsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DispatcherServer).Assignments(m, &dispatcherAssignmentsServer{stream}) +} + +type Dispatcher_AssignmentsServer interface { + Send(*AssignmentsMessage) error + grpc.ServerStream +} + +type dispatcherAssignmentsServer struct { + grpc.ServerStream +} + +func (x *dispatcherAssignmentsServer) Send(m *AssignmentsMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _Dispatcher_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Dispatcher", + HandlerType: (*DispatcherServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Heartbeat", + Handler: _Dispatcher_Heartbeat_Handler, + }, + { + MethodName: "UpdateTaskStatus", + Handler: _Dispatcher_UpdateTaskStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Session", + Handler: _Dispatcher_Session_Handler, + ServerStreams: true, + }, + { + StreamName: "Tasks", + Handler: _Dispatcher_Tasks_Handler, + ServerStreams: true, + }, + { + StreamName: "Assignments", + Handler: _Dispatcher_Assignments_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/dispatcher.proto", +} + +func (m *SessionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Description != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Description.Size())) + n1, err := m.Description.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.SessionID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *SessionMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + if m.Node != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Node.Size())) + n2, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if len(m.Managers) > 0 { + for _, msg := range m.Managers { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NetworkBootstrapKeys) > 0 { + for _, msg := range m.NetworkBootstrapKeys { + dAtA[i] = 0x22 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.RootCA) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.RootCA))) + i += copy(dAtA[i:], m.RootCA) + } + return i, nil +} + +func (m *HeartbeatRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeartbeatRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *HeartbeatResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeartbeatResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(types.SizeOfStdDuration(m.Period))) + n3, err := types.StdDurationMarshalTo(m.Period, dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *UpdateTaskStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskStatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + if len(m.Updates) > 0 { + for _, msg := range m.Updates { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + if m.Status != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Status.Size())) + n4, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *UpdateTaskStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskStatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TasksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TasksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *TasksMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TasksMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AssignmentsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignmentsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *Assignment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Assignment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Item != nil { + nn5, err := m.Item.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn5 + } + return i, nil +} + +func (m *Assignment_Task) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Task != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Task.Size())) + n6, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *Assignment_Secret) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Secret != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Secret.Size())) + n7, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *Assignment_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Config != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Config.Size())) + n8, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *AssignmentChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignmentChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Assignment != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Assignment.Size())) + n9, err := m.Assignment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.Action != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Action)) + } + return i, nil +} + +func (m *AssignmentsMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignmentsMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Type)) + } + if len(m.AppliesTo) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.AppliesTo))) + i += copy(dAtA[i:], m.AppliesTo) + } + if len(m.ResultsIn) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.ResultsIn))) + i += copy(dAtA[i:], m.ResultsIn) + } + if len(m.Changes) > 0 { + for _, msg := range m.Changes { + dAtA[i] = 0x22 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintDispatcher(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyDispatcherServer struct { + local DispatcherServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyDispatcherServer(local DispatcherServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) DispatcherServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyDispatcherServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyDispatcherServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyDispatcherServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type Dispatcher_SessionServerWrapper struct { + Dispatcher_SessionServer + ctx context.Context +} + +func (s Dispatcher_SessionServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyDispatcherServer) Session(r *SessionRequest, stream Dispatcher_SessionServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Dispatcher_SessionServerWrapper{ + Dispatcher_SessionServer: stream, + ctx: ctx, + } + return p.local.Session(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewDispatcherClient(conn).Session(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +func (p *raftProxyDispatcherServer) Heartbeat(ctx context.Context, r *HeartbeatRequest) (*HeartbeatResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Heartbeat(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewDispatcherClient(conn).Heartbeat(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Heartbeat(ctx, r) + } + return nil, err + } + return NewDispatcherClient(conn).Heartbeat(modCtx, r) + } + return resp, err +} + +func (p *raftProxyDispatcherServer) UpdateTaskStatus(ctx context.Context, r *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateTaskStatus(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewDispatcherClient(conn).UpdateTaskStatus(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateTaskStatus(ctx, r) + } + return nil, err + } + return NewDispatcherClient(conn).UpdateTaskStatus(modCtx, r) + } + return resp, err +} + +type Dispatcher_TasksServerWrapper struct { + Dispatcher_TasksServer + ctx context.Context +} + +func (s Dispatcher_TasksServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyDispatcherServer) Tasks(r *TasksRequest, stream Dispatcher_TasksServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Dispatcher_TasksServerWrapper{ + Dispatcher_TasksServer: stream, + ctx: ctx, + } + return p.local.Tasks(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewDispatcherClient(conn).Tasks(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type Dispatcher_AssignmentsServerWrapper struct { + Dispatcher_AssignmentsServer + ctx context.Context +} + +func (s Dispatcher_AssignmentsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyDispatcherServer) Assignments(r *AssignmentsRequest, stream Dispatcher_AssignmentsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Dispatcher_AssignmentsServerWrapper{ + Dispatcher_AssignmentsServer: stream, + ctx: ctx, + } + return p.local.Assignments(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewDispatcherClient(conn).Assignments(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +func (m *SessionRequest) Size() (n int) { + var l int + _ = l + if m.Description != nil { + l = m.Description.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *SessionMessage) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + if len(m.Managers) > 0 { + for _, e := range m.Managers { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + if len(m.NetworkBootstrapKeys) > 0 { + for _, e := range m.NetworkBootstrapKeys { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + l = len(m.RootCA) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *HeartbeatRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *HeartbeatResponse) Size() (n int) { + var l int + _ = l + l = types.SizeOfStdDuration(m.Period) + n += 1 + l + sovDispatcher(uint64(l)) + return n +} + +func (m *UpdateTaskStatusRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if len(m.Updates) > 0 { + for _, e := range m.Updates { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + return n +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Size() (n int) { + var l int + _ = l + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *UpdateTaskStatusResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *TasksRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *TasksMessage) Size() (n int) { + var l int + _ = l + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + return n +} + +func (m *AssignmentsRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *Assignment) Size() (n int) { + var l int + _ = l + if m.Item != nil { + n += m.Item.Size() + } + return n +} + +func (m *Assignment_Task) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} +func (m *Assignment_Secret) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} +func (m *Assignment_Config) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} +func (m *AssignmentChange) Size() (n int) { + var l int + _ = l + if m.Assignment != nil { + l = m.Assignment.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + if m.Action != 0 { + n += 1 + sovDispatcher(uint64(m.Action)) + } + return n +} + +func (m *AssignmentsMessage) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovDispatcher(uint64(m.Type)) + } + l = len(m.AppliesTo) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + l = len(m.ResultsIn) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if len(m.Changes) > 0 { + for _, e := range m.Changes { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + return n +} + +func sovDispatcher(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozDispatcher(x uint64) (n int) { + return sovDispatcher(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SessionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionRequest{`, + `Description:` + strings.Replace(fmt.Sprintf("%v", this.Description), "NodeDescription", "NodeDescription", 1) + `,`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *SessionMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionMessage{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `Managers:` + strings.Replace(fmt.Sprintf("%v", this.Managers), "WeightedPeer", "WeightedPeer", 1) + `,`, + `NetworkBootstrapKeys:` + strings.Replace(fmt.Sprintf("%v", this.NetworkBootstrapKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `RootCA:` + fmt.Sprintf("%v", this.RootCA) + `,`, + `}`, + }, "") + return s +} +func (this *HeartbeatRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HeartbeatRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *HeartbeatResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HeartbeatResponse{`, + `Period:` + strings.Replace(strings.Replace(this.Period.String(), "Duration", "google_protobuf1.Duration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskStatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskStatusRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `Updates:` + strings.Replace(fmt.Sprintf("%v", this.Updates), "UpdateTaskStatusRequest_TaskStatusUpdate", "UpdateTaskStatusRequest_TaskStatusUpdate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskStatusRequest_TaskStatusUpdate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskStatusRequest_TaskStatusUpdate{`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "TaskStatus", "TaskStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskStatusResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskStatusResponse{`, + `}`, + }, "") + return s +} +func (this *TasksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TasksRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *TasksMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TasksMessage{`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AssignmentsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AssignmentsRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment{`, + `Item:` + fmt.Sprintf("%v", this.Item) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment_Task{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment_Secret{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment_Config{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AssignmentChange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AssignmentChange{`, + `Assignment:` + strings.Replace(fmt.Sprintf("%v", this.Assignment), "Assignment", "Assignment", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `}`, + }, "") + return s +} +func (this *AssignmentsMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AssignmentsMessage{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `AppliesTo:` + fmt.Sprintf("%v", this.AppliesTo) + `,`, + `ResultsIn:` + fmt.Sprintf("%v", this.ResultsIn) + `,`, + `Changes:` + strings.Replace(fmt.Sprintf("%v", this.Changes), "AssignmentChange", "AssignmentChange", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringDispatcher(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SessionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Description == nil { + m.Description = &NodeDescription{} + } + if err := m.Description.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Managers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Managers = append(m.Managers, &WeightedPeer{}) + if err := m.Managers[len(m.Managers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkBootstrapKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkBootstrapKeys = append(m.NetworkBootstrapKeys, &EncryptionKey{}) + if err := m.NetworkBootstrapKeys[len(m.NetworkBootstrapKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootCA", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootCA = append(m.RootCA[:0], dAtA[iNdEx:postIndex]...) + if m.RootCA == nil { + m.RootCA = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeartbeatRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeartbeatRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeartbeatRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeartbeatResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeartbeatResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeartbeatResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdDurationUnmarshal(&m.Period, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateTaskStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateTaskStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Updates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Updates = append(m.Updates, &UpdateTaskStatusRequest_TaskStatusUpdate{}) + if err := m.Updates[len(m.Updates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskStatusUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskStatusUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &TaskStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateTaskStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateTaskStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TasksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TasksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TasksMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TasksMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TasksMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignmentsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignmentsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignmentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Assignment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Assignment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Assignment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Task{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &Assignment_Task{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Secret{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &Assignment_Secret{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Config{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &Assignment_Config{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignmentChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignmentChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignmentChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Assignment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Assignment == nil { + m.Assignment = &Assignment{} + } + if err := m.Assignment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (AssignmentChange_AssignmentAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignmentsMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignmentsMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignmentsMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (AssignmentsMessage_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliesTo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppliesTo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultsIn", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResultsIn = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Changes = append(m.Changes, &AssignmentChange{}) + if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDispatcher(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthDispatcher + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDispatcher(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDispatcher = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDispatcher = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/dispatcher.proto", fileDescriptorDispatcher) +} + +var fileDescriptorDispatcher = []byte{ + // 1007 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x4f, 0x6f, 0xe3, 0x44, + 0x1c, 0xcd, 0xa4, 0xa9, 0xdb, 0xfc, 0xd2, 0x2d, 0x61, 0xb4, 0x2a, 0xc6, 0xd2, 0xa6, 0xc1, 0x65, + 0xab, 0x8a, 0x2d, 0xce, 0x12, 0xfe, 0x1d, 0xa8, 0x0a, 0x4d, 0x13, 0xa9, 0xd1, 0x6e, 0xbb, 0xd5, + 0xb4, 0xbb, 0x7b, 0xac, 0x1c, 0x7b, 0xd6, 0x35, 0x69, 0x3c, 0xc6, 0x33, 0xd9, 0x25, 0x07, 0x24, + 0x0e, 0xac, 0x84, 0x38, 0x21, 0x4e, 0x95, 0x10, 0x5f, 0x01, 0xf1, 0x31, 0x2a, 0x4e, 0x1c, 0x39, + 0x15, 0x36, 0x1f, 0x80, 0x0f, 0xc0, 0x09, 0x79, 0x3c, 0x4e, 0x42, 0x37, 0x69, 0xd3, 0x9e, 0x12, + 0xcf, 0xbc, 0xf7, 0xe6, 0xf9, 0xfd, 0x7e, 0xfe, 0x0d, 0x54, 0x3c, 0x5f, 0x1c, 0x77, 0x5b, 0x96, + 0xc3, 0x3a, 0x15, 0x97, 0x39, 0x6d, 0x1a, 0x55, 0xf8, 0x0b, 0x3b, 0xea, 0xb4, 0x7d, 0x51, 0xb1, + 0x43, 0xbf, 0xe2, 0xfa, 0x3c, 0xb4, 0x85, 0x73, 0x4c, 0x23, 0x2b, 0x8c, 0x98, 0x60, 0x18, 0x27, + 0x28, 0x2b, 0x45, 0x59, 0xcf, 0x3f, 0x30, 0xde, 0xbb, 0x42, 0x44, 0xf4, 0x42, 0xca, 0x13, 0xbe, + 0xb1, 0x7e, 0x05, 0x96, 0xb5, 0xbe, 0xa4, 0x8e, 0x48, 0xd1, 0xb7, 0x3d, 0xe6, 0x31, 0xf9, 0xb7, + 0x12, 0xff, 0x53, 0xab, 0x9f, 0x5e, 0xa2, 0x21, 0x11, 0xad, 0xee, 0xb3, 0x4a, 0x78, 0xd2, 0xf5, + 0xfc, 0x40, 0xfd, 0x28, 0x62, 0xc9, 0x63, 0xcc, 0x3b, 0xa1, 0x43, 0x90, 0xdb, 0x8d, 0x6c, 0xe1, + 0x33, 0xb5, 0x6f, 0xbe, 0x44, 0xb0, 0x78, 0x40, 0x39, 0xf7, 0x59, 0x40, 0xe8, 0x57, 0x5d, 0xca, + 0x05, 0x6e, 0x40, 0xc1, 0xa5, 0xdc, 0x89, 0xfc, 0x30, 0xc6, 0xe9, 0xa8, 0x8c, 0xd6, 0x0a, 0xd5, + 0x15, 0xeb, 0xf5, 0x14, 0xac, 0x3d, 0xe6, 0xd2, 0xfa, 0x10, 0x4a, 0x46, 0x79, 0x78, 0x1d, 0x80, + 0x27, 0xc2, 0x47, 0xbe, 0xab, 0x67, 0xcb, 0x68, 0x2d, 0x5f, 0xbb, 0xd5, 0x3f, 0x5f, 0xce, 0xab, + 0xe3, 0x9a, 0x75, 0x92, 0x57, 0x80, 0xa6, 0x6b, 0xfe, 0x9c, 0x1d, 0xf8, 0xd8, 0xa5, 0x9c, 0xdb, + 0x1e, 0xbd, 0x20, 0x80, 0x2e, 0x17, 0xc0, 0xeb, 0x90, 0x0b, 0x98, 0x4b, 0xe5, 0x41, 0x85, 0xaa, + 0x3e, 0xc9, 0x2e, 0x91, 0x28, 0xbc, 0x01, 0xf3, 0x1d, 0x3b, 0xb0, 0x3d, 0x1a, 0x71, 0x7d, 0xa6, + 0x3c, 0xb3, 0x56, 0xa8, 0x96, 0xc7, 0x31, 0x9e, 0x52, 0xdf, 0x3b, 0x16, 0xd4, 0xdd, 0xa7, 0x34, + 0x22, 0x03, 0x06, 0x7e, 0x0a, 0x4b, 0x01, 0x15, 0x2f, 0x58, 0xd4, 0x3e, 0x6a, 0x31, 0x26, 0xb8, + 0x88, 0xec, 0xf0, 0xa8, 0x4d, 0x7b, 0x5c, 0xcf, 0x49, 0xad, 0x77, 0xc6, 0x69, 0x35, 0x02, 0x27, + 0xea, 0xc9, 0x68, 0x1e, 0xd0, 0x1e, 0xb9, 0xad, 0x04, 0x6a, 0x29, 0xff, 0x01, 0xed, 0x71, 0xbc, + 0x04, 0x1a, 0x61, 0x4c, 0x6c, 0x6f, 0xe9, 0xb3, 0x65, 0xb4, 0xb6, 0x40, 0xd4, 0x93, 0xf9, 0x05, + 0x14, 0x77, 0xa8, 0x1d, 0x89, 0x16, 0xb5, 0x45, 0x5a, 0xa6, 0x6b, 0xc5, 0x63, 0xee, 0xc3, 0x9b, + 0x23, 0x0a, 0x3c, 0x64, 0x01, 0xa7, 0xf8, 0x33, 0xd0, 0x42, 0x1a, 0xf9, 0xcc, 0x55, 0x45, 0x7e, + 0xdb, 0x4a, 0xba, 0xc5, 0x4a, 0xbb, 0xc5, 0xaa, 0xab, 0x6e, 0xa9, 0xcd, 0x9f, 0x9d, 0x2f, 0x67, + 0x4e, 0xff, 0x5a, 0x46, 0x44, 0x51, 0xcc, 0x1f, 0xb3, 0xf0, 0xd6, 0xe3, 0xd0, 0xb5, 0x05, 0x3d, + 0xb4, 0x79, 0xfb, 0x40, 0xd8, 0xa2, 0xcb, 0x6f, 0xe4, 0x0d, 0x3f, 0x81, 0xb9, 0xae, 0x14, 0x4a, + 0x6b, 0xb1, 0x31, 0x2e, 0xbf, 0x09, 0x67, 0x59, 0xc3, 0x95, 0x04, 0x41, 0x52, 0x31, 0x83, 0x41, + 0xf1, 0xe2, 0x26, 0x5e, 0x81, 0x39, 0x61, 0xf3, 0xf6, 0xd0, 0x16, 0xf4, 0xcf, 0x97, 0xb5, 0x18, + 0xd6, 0xac, 0x13, 0x2d, 0xde, 0x6a, 0xba, 0xf8, 0x13, 0xd0, 0xb8, 0x24, 0xa9, 0x6e, 0x2a, 0x8d, + 0xf3, 0x33, 0xe2, 0x44, 0xa1, 0x4d, 0x03, 0xf4, 0xd7, 0x5d, 0x26, 0x59, 0x9b, 0x1b, 0xb0, 0x10, + 0xaf, 0xde, 0x2c, 0x22, 0x73, 0x53, 0xb1, 0xd3, 0x6f, 0xc3, 0x82, 0xd9, 0xd8, 0x2b, 0xd7, 0x91, + 0x0c, 0x4c, 0x9f, 0x64, 0x90, 0x24, 0x30, 0xb3, 0x06, 0x78, 0x8b, 0x73, 0xdf, 0x0b, 0x3a, 0x34, + 0x10, 0x37, 0xf4, 0xf0, 0x1b, 0x02, 0x18, 0x8a, 0x60, 0x0b, 0x72, 0xb1, 0xb6, 0x6a, 0x9d, 0x89, + 0x0e, 0x76, 0x32, 0x44, 0xe2, 0xf0, 0x47, 0xa0, 0x71, 0xea, 0x44, 0x54, 0xa8, 0x50, 0x8d, 0x71, + 0x8c, 0x03, 0x89, 0xd8, 0xc9, 0x10, 0x85, 0x8d, 0x59, 0x0e, 0x0b, 0x9e, 0xf9, 0x9e, 0x3e, 0x33, + 0x99, 0xb5, 0x2d, 0x11, 0x31, 0x2b, 0xc1, 0xd6, 0x34, 0xc8, 0xf9, 0x82, 0x76, 0xcc, 0x97, 0x59, + 0x28, 0x0e, 0x2d, 0x6f, 0x1f, 0xdb, 0x81, 0x47, 0xf1, 0x26, 0x80, 0x3d, 0x58, 0x53, 0xf6, 0xc7, + 0x56, 0x78, 0xc8, 0x24, 0x23, 0x0c, 0xbc, 0x0b, 0x9a, 0xed, 0xc8, 0xd1, 0x18, 0xbf, 0xc8, 0x62, + 0xf5, 0xe3, 0xcb, 0xb9, 0xc9, 0xa9, 0x23, 0x0b, 0x5b, 0x92, 0x4c, 0x94, 0x88, 0xd9, 0x1a, 0xb5, + 0x98, 0xec, 0xe1, 0x55, 0xd0, 0x1e, 0xef, 0xd7, 0xb7, 0x0e, 0x1b, 0xc5, 0x8c, 0x61, 0xfc, 0xf0, + 0x4b, 0x79, 0xe9, 0x22, 0x42, 0x75, 0xf3, 0x2a, 0x68, 0xa4, 0xb1, 0xfb, 0xe8, 0x49, 0xa3, 0x88, + 0xc6, 0xe3, 0x08, 0xed, 0xb0, 0xe7, 0xd4, 0xfc, 0x17, 0xfd, 0xaf, 0xfe, 0x69, 0x17, 0x7d, 0x0e, + 0xb9, 0xf8, 0xa2, 0x92, 0x19, 0x2c, 0x56, 0xef, 0x5d, 0xfe, 0x1e, 0x29, 0xcb, 0x3a, 0xec, 0x85, + 0x94, 0x48, 0x22, 0xbe, 0x03, 0x60, 0x87, 0xe1, 0x89, 0x4f, 0xf9, 0x91, 0x60, 0xc9, 0x8c, 0x27, + 0x79, 0xb5, 0x72, 0xc8, 0xe2, 0xed, 0x88, 0xf2, 0xee, 0x89, 0xe0, 0x47, 0x7e, 0x20, 0x0b, 0x98, + 0x27, 0x79, 0xb5, 0xd2, 0x0c, 0xf0, 0x26, 0xcc, 0x39, 0x32, 0x9c, 0x74, 0x6e, 0xbe, 0x3b, 0x4d, + 0x92, 0x24, 0x25, 0x99, 0x77, 0x21, 0x17, 0x7b, 0xc1, 0x0b, 0x30, 0xbf, 0xfd, 0x68, 0x77, 0xff, + 0x61, 0x23, 0xce, 0x0b, 0xbf, 0x01, 0x85, 0xe6, 0xde, 0x36, 0x69, 0xec, 0x36, 0xf6, 0x0e, 0xb7, + 0x1e, 0x16, 0x51, 0xf5, 0x74, 0x16, 0xa0, 0x3e, 0xb8, 0xd4, 0xf1, 0xd7, 0x30, 0xa7, 0xda, 0x1b, + 0x9b, 0xe3, 0x5b, 0x70, 0xf4, 0x36, 0x34, 0x2e, 0xc3, 0xa8, 0x44, 0xcc, 0x95, 0xdf, 0x7f, 0xfd, + 0xe7, 0x34, 0x7b, 0x07, 0x16, 0x24, 0xe6, 0xfd, 0x78, 0xae, 0xd3, 0x08, 0x6e, 0x25, 0x4f, 0xea, + 0xd6, 0xb8, 0x8f, 0xf0, 0x37, 0x90, 0x1f, 0xcc, 0x60, 0x3c, 0xf6, 0x5d, 0x2f, 0x0e, 0x79, 0xe3, + 0xee, 0x15, 0x28, 0x35, 0x5c, 0xa6, 0x31, 0x80, 0x7f, 0x42, 0x50, 0xbc, 0x38, 0x9e, 0xf0, 0xbd, + 0x6b, 0x8c, 0x5a, 0x63, 0x7d, 0x3a, 0xf0, 0x75, 0x4c, 0x75, 0x61, 0x56, 0x0e, 0x36, 0x5c, 0x9e, + 0x34, 0x40, 0x06, 0xa7, 0x4f, 0x46, 0xa4, 0x75, 0x58, 0x9d, 0xe2, 0xc4, 0xef, 0xb3, 0xe8, 0x3e, + 0xc2, 0xdf, 0x21, 0x28, 0x8c, 0xb4, 0x36, 0x5e, 0xbd, 0xa2, 0xf7, 0x53, 0x0f, 0xab, 0xd3, 0x7d, + 0x23, 0x53, 0x76, 0x44, 0x4d, 0x3f, 0x7b, 0x55, 0xca, 0xfc, 0xf9, 0xaa, 0x94, 0xf9, 0xb6, 0x5f, + 0x42, 0x67, 0xfd, 0x12, 0xfa, 0xa3, 0x5f, 0x42, 0x7f, 0xf7, 0x4b, 0xa8, 0xa5, 0xc9, 0x2b, 0xf8, + 0xc3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe0, 0xf0, 0x6a, 0xcb, 0xae, 0x0a, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/dispatcher.proto b/extender/code/vendor/github.com/docker/swarmkit/api/dispatcher.proto new file mode 100644 index 000000000..232580ec4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/dispatcher.proto @@ -0,0 +1,218 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "github.com/docker/swarmkit/api/objects.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; +import "google/protobuf/duration.proto"; + +// Dispatcher is the API provided by a manager group for agents to connect to. Agents +// connect to this service to receive task assignments and report status. +// +// API methods on this service are used only by agent nodes. +service Dispatcher { // maybe dispatch, al likes this + // Session starts an agent session with the dispatcher. The session is + // started after the first SessionMessage is received. + // + // Once started, the agent is controlled with a stream of SessionMessage. + // Agents should list on the stream at all times for instructions. + rpc Session(SessionRequest) returns (stream SessionMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; + + // Heartbeat is heartbeat method for nodes. It returns new TTL in response. + // Node should send new heartbeat earlier than now + TTL, otherwise it will + // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN + rpc Heartbeat(HeartbeatRequest) returns (HeartbeatResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; + + // UpdateTaskStatus updates status of task. Node should send such updates + // on every status change of its tasks. + // + // Whether receiving batch updates or single status updates, this method + // should be accepting. Errors should only be returned if the entire update + // should be retried, due to data loss or other problems. + // + // If a task is unknown the dispatcher, the status update should be + // accepted regardless. + rpc UpdateTaskStatus(UpdateTaskStatusRequest) returns (UpdateTaskStatusResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; + + // Tasks is a stream of tasks state for node. Each message contains full list + // of tasks which should be run on node, if task is not present in that list, + // it should be terminated. + rpc Tasks(TasksRequest) returns (stream TasksMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + option deprecated = true; + }; + + // Assignments is a stream of assignments such as tasks and secrets for node. + // The first message in the stream contains all of the tasks and secrets + // that are relevant to the node. Future messages in the stream are updates to + // the set of assignments. + rpc Assignments(AssignmentsRequest) returns (stream AssignmentsMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; +} + +// SessionRequest starts a session. +message SessionRequest { + NodeDescription description = 1; + // SessionID can be provided to attempt resuming an existing session. If the + // SessionID is empty or invalid, a new SessionID will be assigned. + // + // See SessionMessage.SessionID for details. + string session_id = 2; +} + +// SessionMessage instructs an agent on various actions as part of the current +// session. An agent should act immediately on the contents. +message SessionMessage { + // SessionID is allocated after a successful registration. It should be + // used on all RPC calls after registration. A dispatcher may choose to + // change the SessionID, at which time an agent must re-register and obtain + // a new one. + // + // All Dispatcher calls after register should include the SessionID. If the + // Dispatcher so chooses, it may reject the call with an InvalidArgument + // error code, at which time the agent should call Register to start a new + // session. + // + // As a rule, once an agent has a SessionID, it should never save it to + // disk or try to otherwise reuse. If the agent loses its SessionID, it + // must start a new session through a call to Register. A Dispatcher may + // choose to reuse the SessionID, if it sees fit, but it is not advised. + // + // The actual implementation of the SessionID is Dispatcher specific and + // should be treated as opaque by agents. + // + // From a Dispatcher perspective, there are many ways to use the SessionID + // to ensure uniqueness of a set of client RPC calls. One method is to keep + // the SessionID unique to every call to Register in a single Dispatcher + // instance. This ensures that the SessionID represents the unique + // session from a single Agent to Manager. If the Agent restarts, we + // allocate a new session, since the restarted Agent is not aware of the + // new SessionID. + // + // The most compelling use case is to support duplicate node detection. If + // one clones a virtual machine, including certificate material, two nodes + // may end up with the same identity. This can also happen if two identical + // agent processes are coming from the same node. If the SessionID is + // replicated through the cluster, we can immediately detect the condition + // and address it. + // + // Extending from the case above, we can actually detect a compromised + // identity. Coupled with provisions to rebuild node identity, we can ban + // the compromised node identity and have the nodes re-authenticate and + // build a new identity. At this time, an administrator can then + // re-authorize the compromised nodes, if it was a mistake or ensure that a + // misbehaved node can no longer connect to the cluster. + // + // We considered placing this field in a GRPC header. Because this is a + // critical feature of the protocol, we thought it should be represented + // directly in the RPC message set. + string session_id = 1; + + // Node identifies the registering node. + Node node = 2; + + // Managers provides a weight list of alternative dispatchers + repeated WeightedPeer managers = 3; + + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + repeated EncryptionKey network_bootstrap_keys = 4; + + // Which root certificates to trust + bytes RootCA = 5; +} + +// HeartbeatRequest provides identifying properties for a single heartbeat. +message HeartbeatRequest { + string session_id = 1; +} + +message HeartbeatResponse { + // Period is the duration to wait before sending the next heartbeat. + // Well-behaved agents should update this on every heartbeat round trip. + google.protobuf.Duration period = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; +} + +message UpdateTaskStatusRequest { + // Tasks should contain all statuses for running tasks. Only the status + // field must be set. The spec is not required. + string session_id = 1; + + message TaskStatusUpdate { + string task_id = 1; + TaskStatus status = 2; + } + + repeated TaskStatusUpdate updates = 3; +} + +message UpdateTaskStatusResponse{ + // void +} + +message TasksRequest { + string session_id = 1; +} + +message TasksMessage { + // Tasks is the set of tasks that should be running on the node. + // Tasks outside of this set running on the node should be terminated. + repeated Task tasks = 1; +} + +message AssignmentsRequest { + string session_id = 1; +} + +message Assignment { + oneof item { + Task task = 1; + Secret secret = 2; + Config config = 3; + } +} + +message AssignmentChange { + enum AssignmentAction { + UPDATE = 0 [(gogoproto.enumvalue_customname) = "AssignmentActionUpdate"]; + REMOVE = 1 [(gogoproto.enumvalue_customname) = "AssignmentActionRemove"]; + } + + Assignment assignment = 1; + AssignmentAction action = 2; +} + +message AssignmentsMessage { + // AssignmentType specifies whether this assignment message carries + // the full state, or is an update to an existing state. + enum Type { + COMPLETE = 0; + INCREMENTAL = 1; + } + + Type type = 1; + + // AppliesTo references the previous ResultsIn value, to chain + // incremental updates together. For the first update in a stream, + // AppliesTo is empty. If AppliesTo does not match the previously + // received ResultsIn, the consumer of the stream should start a new + // Assignments stream to re-sync. + string applies_to = 2; + + // ResultsIn identifies the result of this assignments message, to + // match against the next message's AppliesTo value and protect + // against missed messages. + string results_in = 3; + + // AssignmentChange is a set of changes to apply on this node. + repeated AssignmentChange changes = 4; +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/equality/equality.go b/extender/code/vendor/github.com/docker/swarmkit/api/equality/equality.go new file mode 100644 index 000000000..522c71981 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/equality/equality.go @@ -0,0 +1,67 @@ +package equality + +import ( + "crypto/subtle" + "reflect" + + "github.com/docker/swarmkit/api" +) + +// TasksEqualStable returns true if the tasks are functionally equal, ignoring status, +// version and other superfluous fields. +// +// This used to decide whether or not to propagate a task update to a controller. +func TasksEqualStable(a, b *api.Task) bool { + // shallow copy + copyA, copyB := *a, *b + + copyA.Status, copyB.Status = api.TaskStatus{}, api.TaskStatus{} + copyA.Meta, copyB.Meta = api.Meta{}, api.Meta{} + + return reflect.DeepEqual(©A, ©B) +} + +// TaskStatusesEqualStable compares the task status excluding timestamp fields. +func TaskStatusesEqualStable(a, b *api.TaskStatus) bool { + copyA, copyB := *a, *b + + copyA.Timestamp, copyB.Timestamp = nil, nil + return reflect.DeepEqual(©A, ©B) +} + +// RootCAEqualStable compares RootCAs, excluding join tokens, which are randomly generated +func RootCAEqualStable(a, b *api.RootCA) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + + var aRotationKey, bRotationKey []byte + if a.RootRotation != nil { + aRotationKey = a.RootRotation.CAKey + } + if b.RootRotation != nil { + bRotationKey = b.RootRotation.CAKey + } + if subtle.ConstantTimeCompare(a.CAKey, b.CAKey) != 1 || subtle.ConstantTimeCompare(aRotationKey, bRotationKey) != 1 { + return false + } + + copyA, copyB := *a, *b + copyA.JoinTokens, copyB.JoinTokens = api.JoinTokens{}, api.JoinTokens{} + return reflect.DeepEqual(copyA, copyB) +} + +// ExternalCAsEqualStable compares lists of external CAs and determines whether they are equal. +func ExternalCAsEqualStable(a, b []*api.ExternalCA) bool { + // because DeepEqual will treat an empty list and a nil list differently, we want to manually check this first + if len(a) == 0 && len(b) == 0 { + return true + } + // The assumption is that each individual api.ExternalCA within both lists are created from deserializing from a + // protobuf, so no special affordances are made to treat a nil map and empty map in the Options field of an + // api.ExternalCA as equivalent. + return reflect.DeepEqual(a, b) +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/health.pb.go b/extender/code/vendor/github.com/docker/swarmkit/api/health.pb.go new file mode 100644 index 000000000..453e01fc3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/health.pb.go @@ -0,0 +1,703 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/health.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptorHealth, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptorHealth, []int{0} } + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=docker.swarmkit.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptorHealth, []int{1} } + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "docker.swarmkit.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "docker.swarmkit.v1.HealthCheckResponse") + proto.RegisterEnum("docker.swarmkit.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +type authenticatedWrapperHealthServer struct { + local HealthServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperHealthServer(local HealthServer, authorize func(context.Context, []string) error) HealthServer { + return &authenticatedWrapperHealthServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.Check(ctx, r) +} + +func (m *HealthCheckRequest) Copy() *HealthCheckRequest { + if m == nil { + return nil + } + o := &HealthCheckRequest{} + o.CopyFrom(m) + return o +} + +func (m *HealthCheckRequest) CopyFrom(src interface{}) { + + o := src.(*HealthCheckRequest) + *m = *o +} + +func (m *HealthCheckResponse) Copy() *HealthCheckResponse { + if m == nil { + return nil + } + o := &HealthCheckResponse{} + o.CopyFrom(m) + return o +} + +func (m *HealthCheckResponse) CopyFrom(src interface{}) { + + o := src.(*HealthCheckResponse) + *m = *o +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/health.proto", +} + +func (m *HealthCheckRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheckRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Service) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintHealth(dAtA, i, uint64(len(m.Service))) + i += copy(dAtA[i:], m.Service) + } + return i, nil +} + +func (m *HealthCheckResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheckResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Status != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintHealth(dAtA, i, uint64(m.Status)) + } + return i, nil +} + +func encodeVarintHealth(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyHealthServer struct { + local HealthServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyHealthServer(local HealthServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) HealthServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyHealthServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyHealthServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyHealthServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Check(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewHealthClient(conn).Check(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Check(ctx, r) + } + return nil, err + } + return NewHealthClient(conn).Check(modCtx, r) + } + return resp, err +} + +func (m *HealthCheckRequest) Size() (n int) { + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + sovHealth(uint64(l)) + } + return n +} + +func (m *HealthCheckResponse) Size() (n int) { + var l int + _ = l + if m.Status != 0 { + n += 1 + sovHealth(uint64(m.Status)) + } + return n +} + +func sovHealth(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozHealth(x uint64) (n int) { + return sovHealth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *HealthCheckRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheckRequest{`, + `Service:` + fmt.Sprintf("%v", this.Service) + `,`, + `}`, + }, "") + return s +} +func (this *HealthCheckResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheckResponse{`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `}`, + }, "") + return s +} +func valueToStringHealth(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HealthCheckRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheckRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHealth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHealth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHealth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthCheckResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheckResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= (HealthCheckResponse_ServingStatus(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipHealth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHealth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipHealth(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthHealth + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipHealth(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthHealth = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowHealth = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/health.proto", fileDescriptorHealth) } + +var fileDescriptorHealth = []byte{ + // 315 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xc9, 0x4f, 0xce, 0x4e, 0x2d, 0xd2, 0x2f, 0x2e, + 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x4f, 0x2c, 0xc8, 0xd4, 0xcf, 0x48, 0x4d, 0xcc, 0x29, + 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x82, 0xa8, 0xd0, 0x83, 0xa9, 0xd0, 0x2b, + 0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x4b, 0xeb, 0x83, 0x58, 0x10, 0x95, 0x52, 0xe6, + 0x78, 0x8c, 0x05, 0xab, 0x48, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x83, 0x52, + 0x10, 0x8d, 0x4a, 0x7a, 0x5c, 0x42, 0x1e, 0x60, 0x2b, 0x9d, 0x33, 0x52, 0x93, 0xb3, 0x83, 0x52, + 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, + 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x60, 0x5c, 0xa5, 0x05, 0x8c, 0x5c, 0xc2, 0x28, 0x1a, + 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x7c, 0xb9, 0xd8, 0x8a, 0x4b, 0x12, 0x4b, 0x4a, 0x8b, + 0xc1, 0x1a, 0xf8, 0x8c, 0x4c, 0xf5, 0x30, 0xdd, 0xae, 0x87, 0x45, 0xa3, 0x5e, 0x30, 0xc8, 0xe0, + 0xbc, 0xf4, 0x60, 0xb0, 0xe6, 0x20, 0xa8, 0x21, 0x4a, 0x56, 0x5c, 0xbc, 0x28, 0x12, 0x42, 0xdc, + 0x5c, 0xec, 0xa1, 0x7e, 0xde, 0x7e, 0xfe, 0xe1, 0x7e, 0x02, 0x0c, 0x20, 0x4e, 0xb0, 0x6b, 0x50, + 0x98, 0xa7, 0x9f, 0xbb, 0x00, 0xa3, 0x10, 0x3f, 0x17, 0xb7, 0x9f, 0x7f, 0x48, 0x3c, 0x4c, 0x80, + 0xc9, 0xa8, 0x92, 0x8b, 0x0d, 0x62, 0x91, 0x50, 0x3e, 0x17, 0x2b, 0xd8, 0x32, 0x21, 0x35, 0x82, + 0xae, 0x01, 0xfb, 0x5b, 0x4a, 0x9d, 0x48, 0x57, 0x2b, 0x89, 0x9e, 0x5a, 0xf7, 0x6e, 0x06, 0x13, + 0x3f, 0x17, 0x2f, 0x58, 0xa1, 0x6e, 0x6e, 0x62, 0x5e, 0x62, 0x7a, 0x6a, 0x91, 0x93, 0xc4, 0x89, + 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, + 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x93, 0xd8, 0xc0, 0xc1, 0x6d, 0x0c, 0x08, 0x00, + 0x00, 0xff, 0xff, 0x7b, 0xf2, 0xdd, 0x23, 0x00, 0x02, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/health.proto b/extender/code/vendor/github.com/docker/swarmkit/api/health.proto new file mode 100644 index 000000000..8e066c0f8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/health.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +// See: https://github.com/grpc/grpc-go/blob/master/health/grpc_health_v1/health.proto +// +// We use the same health check service proto description defined in the gRPC documentation, +// including the authorization check. This requires our own implementation of the health +// package located in `manager/health`. +// +// For more infos, refer to: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md + +package docker.swarmkit.v1; + +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +service Health { + rpc Check(HealthCheckRequest) returns (HealthCheckResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; +} + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/logbroker.pb.go b/extender/code/vendor/github.com/docker/swarmkit/api/logbroker.pb.go new file mode 100644 index 000000000..b6231e941 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/logbroker.pb.go @@ -0,0 +1,3400 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/logbroker.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf "github.com/gogo/protobuf/types" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// LogStream defines the stream from which the log message came. +type LogStream int32 + +const ( + LogStreamUnknown LogStream = 0 + LogStreamStdout LogStream = 1 + LogStreamStderr LogStream = 2 +) + +var LogStream_name = map[int32]string{ + 0: "LOG_STREAM_UNKNOWN", + 1: "LOG_STREAM_STDOUT", + 2: "LOG_STREAM_STDERR", +} +var LogStream_value = map[string]int32{ + "LOG_STREAM_UNKNOWN": 0, + "LOG_STREAM_STDOUT": 1, + "LOG_STREAM_STDERR": 2, +} + +func (x LogStream) String() string { + return proto.EnumName(LogStream_name, int32(x)) +} +func (LogStream) EnumDescriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{0} } + +type LogSubscriptionOptions struct { + // Streams defines which log streams should be sent from the task source. + // Empty means send all the messages. + Streams []LogStream `protobuf:"varint,1,rep,name=streams,enum=docker.swarmkit.v1.LogStream" json:"streams,omitempty"` + // Follow instructs the publisher to continue sending log messages as they + // are produced, after satisfying the initial query. + Follow bool `protobuf:"varint,2,opt,name=follow,proto3" json:"follow,omitempty"` + // Tail defines how many messages relative to the log stream to send when + // starting the stream. + // + // Positive values will skip that number of messages from the start of the + // stream before publishing. + // + // Negative values will specify messages relative to the end of the stream, + // offset by one. We can say that the last (-n-1) lines are returned when n + // < 0. As reference, -1 would mean send no log lines (typically used with + // follow), -2 would return the last log line, -11 would return the last 10 + // and so on. + // + // The default value of zero will return all logs. + // + // Note that this is very different from the Docker API. + Tail int64 `protobuf:"varint,3,opt,name=tail,proto3" json:"tail,omitempty"` + // Since indicates that only log messages produced after this timestamp + // should be sent. + // Note: can't use stdtime because this field is nullable. + Since *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=since" json:"since,omitempty"` +} + +func (m *LogSubscriptionOptions) Reset() { *m = LogSubscriptionOptions{} } +func (*LogSubscriptionOptions) ProtoMessage() {} +func (*LogSubscriptionOptions) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{0} } + +// LogSelector will match logs from ANY of the defined parameters. +// +// For the best effect, the client should use the least specific parameter +// possible. For example, if they want to listen to all the tasks of a service, +// they should use the service id, rather than specifying the individual tasks. +type LogSelector struct { + ServiceIDs []string `protobuf:"bytes,1,rep,name=service_ids,json=serviceIds" json:"service_ids,omitempty"` + NodeIDs []string `protobuf:"bytes,2,rep,name=node_ids,json=nodeIds" json:"node_ids,omitempty"` + TaskIDs []string `protobuf:"bytes,3,rep,name=task_ids,json=taskIds" json:"task_ids,omitempty"` +} + +func (m *LogSelector) Reset() { *m = LogSelector{} } +func (*LogSelector) ProtoMessage() {} +func (*LogSelector) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{1} } + +// LogContext marks the context from which a log message was generated. +type LogContext struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + NodeID string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + TaskID string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +} + +func (m *LogContext) Reset() { *m = LogContext{} } +func (*LogContext) ProtoMessage() {} +func (*LogContext) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{2} } + +// LogAttr is an extra key/value pair that may be have been set by users +type LogAttr struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *LogAttr) Reset() { *m = LogAttr{} } +func (*LogAttr) ProtoMessage() {} +func (*LogAttr) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{3} } + +// LogMessage +type LogMessage struct { + // Context identifies the source of the log message. + Context LogContext `protobuf:"bytes,1,opt,name=context" json:"context"` + // Timestamp is the time at which the message was generated. + // Note: can't use stdtime because this field is nullable. + Timestamp *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp,omitempty"` + // Stream identifies the stream of the log message, stdout or stderr. + Stream LogStream `protobuf:"varint,3,opt,name=stream,proto3,enum=docker.swarmkit.v1.LogStream" json:"stream,omitempty"` + // Data is the raw log message, as generated by the application. + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + // Attrs is a list of key value pairs representing additional log details + // that may have been returned from the logger + Attrs []LogAttr `protobuf:"bytes,5,rep,name=attrs" json:"attrs"` +} + +func (m *LogMessage) Reset() { *m = LogMessage{} } +func (*LogMessage) ProtoMessage() {} +func (*LogMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{4} } + +type SubscribeLogsRequest struct { + // LogSelector describes the logs to which the subscriber is + Selector *LogSelector `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + Options *LogSubscriptionOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (m *SubscribeLogsRequest) Reset() { *m = SubscribeLogsRequest{} } +func (*SubscribeLogsRequest) ProtoMessage() {} +func (*SubscribeLogsRequest) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{5} } + +type SubscribeLogsMessage struct { + Messages []LogMessage `protobuf:"bytes,1,rep,name=messages" json:"messages"` +} + +func (m *SubscribeLogsMessage) Reset() { *m = SubscribeLogsMessage{} } +func (*SubscribeLogsMessage) ProtoMessage() {} +func (*SubscribeLogsMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{6} } + +// ListenSubscriptionsRequest is a placeholder to begin listening for +// subscriptions. +type ListenSubscriptionsRequest struct { +} + +func (m *ListenSubscriptionsRequest) Reset() { *m = ListenSubscriptionsRequest{} } +func (*ListenSubscriptionsRequest) ProtoMessage() {} +func (*ListenSubscriptionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptorLogbroker, []int{7} +} + +// SubscriptionMessage instructs the listener to start publishing messages for +// the stream or end a subscription. +// +// If Options.Follow == false, the worker should end the subscription on its own. +type SubscriptionMessage struct { + // ID identifies the subscription. + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Selector defines which sources should be sent for the subscription. + Selector *LogSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` + // Options specify how the subscription should be satisfied. + Options *LogSubscriptionOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Close will be true if the node should shutdown the subscription with the + // provided identifier. + Close bool `protobuf:"varint,4,opt,name=close,proto3" json:"close,omitempty"` +} + +func (m *SubscriptionMessage) Reset() { *m = SubscriptionMessage{} } +func (*SubscriptionMessage) ProtoMessage() {} +func (*SubscriptionMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{8} } + +type PublishLogsMessage struct { + // SubscriptionID identifies which subscription the set of messages should + // be sent to. We can think of this as a "mail box" for the subscription. + SubscriptionID string `protobuf:"bytes,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` + // Messages is the log message for publishing. + Messages []LogMessage `protobuf:"bytes,2,rep,name=messages" json:"messages"` + // Close is a boolean for whether or not the client has completed its log + // stream. When close is called, the manager can hang up the subscription. + // Any further logs from this subscription are an error condition. Any + // messages included when close is set can be discarded + Close bool `protobuf:"varint,3,opt,name=close,proto3" json:"close,omitempty"` +} + +func (m *PublishLogsMessage) Reset() { *m = PublishLogsMessage{} } +func (*PublishLogsMessage) ProtoMessage() {} +func (*PublishLogsMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{9} } + +type PublishLogsResponse struct { +} + +func (m *PublishLogsResponse) Reset() { *m = PublishLogsResponse{} } +func (*PublishLogsResponse) ProtoMessage() {} +func (*PublishLogsResponse) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{10} } + +func init() { + proto.RegisterType((*LogSubscriptionOptions)(nil), "docker.swarmkit.v1.LogSubscriptionOptions") + proto.RegisterType((*LogSelector)(nil), "docker.swarmkit.v1.LogSelector") + proto.RegisterType((*LogContext)(nil), "docker.swarmkit.v1.LogContext") + proto.RegisterType((*LogAttr)(nil), "docker.swarmkit.v1.LogAttr") + proto.RegisterType((*LogMessage)(nil), "docker.swarmkit.v1.LogMessage") + proto.RegisterType((*SubscribeLogsRequest)(nil), "docker.swarmkit.v1.SubscribeLogsRequest") + proto.RegisterType((*SubscribeLogsMessage)(nil), "docker.swarmkit.v1.SubscribeLogsMessage") + proto.RegisterType((*ListenSubscriptionsRequest)(nil), "docker.swarmkit.v1.ListenSubscriptionsRequest") + proto.RegisterType((*SubscriptionMessage)(nil), "docker.swarmkit.v1.SubscriptionMessage") + proto.RegisterType((*PublishLogsMessage)(nil), "docker.swarmkit.v1.PublishLogsMessage") + proto.RegisterType((*PublishLogsResponse)(nil), "docker.swarmkit.v1.PublishLogsResponse") + proto.RegisterEnum("docker.swarmkit.v1.LogStream", LogStream_name, LogStream_value) +} + +type authenticatedWrapperLogsServer struct { + local LogsServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperLogsServer(local LogsServer, authorize func(context.Context, []string) error) LogsServer { + return &authenticatedWrapperLogsServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperLogsServer) SubscribeLogs(r *SubscribeLogsRequest, stream Logs_SubscribeLogsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil { + return err + } + return p.local.SubscribeLogs(r, stream) +} + +type authenticatedWrapperLogBrokerServer struct { + local LogBrokerServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperLogBrokerServer(local LogBrokerServer, authorize func(context.Context, []string) error) LogBrokerServer { + return &authenticatedWrapperLogBrokerServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperLogBrokerServer) ListenSubscriptions(r *ListenSubscriptionsRequest, stream LogBroker_ListenSubscriptionsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.ListenSubscriptions(r, stream) +} + +func (p *authenticatedWrapperLogBrokerServer) PublishLogs(stream LogBroker_PublishLogsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.PublishLogs(stream) +} + +func (m *LogSubscriptionOptions) Copy() *LogSubscriptionOptions { + if m == nil { + return nil + } + o := &LogSubscriptionOptions{} + o.CopyFrom(m) + return o +} + +func (m *LogSubscriptionOptions) CopyFrom(src interface{}) { + + o := src.(*LogSubscriptionOptions) + *m = *o + if o.Streams != nil { + m.Streams = make([]LogStream, len(o.Streams)) + copy(m.Streams, o.Streams) + } + + if o.Since != nil { + m.Since = &google_protobuf.Timestamp{} + deepcopy.Copy(m.Since, o.Since) + } +} + +func (m *LogSelector) Copy() *LogSelector { + if m == nil { + return nil + } + o := &LogSelector{} + o.CopyFrom(m) + return o +} + +func (m *LogSelector) CopyFrom(src interface{}) { + + o := src.(*LogSelector) + *m = *o + if o.ServiceIDs != nil { + m.ServiceIDs = make([]string, len(o.ServiceIDs)) + copy(m.ServiceIDs, o.ServiceIDs) + } + + if o.NodeIDs != nil { + m.NodeIDs = make([]string, len(o.NodeIDs)) + copy(m.NodeIDs, o.NodeIDs) + } + + if o.TaskIDs != nil { + m.TaskIDs = make([]string, len(o.TaskIDs)) + copy(m.TaskIDs, o.TaskIDs) + } + +} + +func (m *LogContext) Copy() *LogContext { + if m == nil { + return nil + } + o := &LogContext{} + o.CopyFrom(m) + return o +} + +func (m *LogContext) CopyFrom(src interface{}) { + + o := src.(*LogContext) + *m = *o +} + +func (m *LogAttr) Copy() *LogAttr { + if m == nil { + return nil + } + o := &LogAttr{} + o.CopyFrom(m) + return o +} + +func (m *LogAttr) CopyFrom(src interface{}) { + + o := src.(*LogAttr) + *m = *o +} + +func (m *LogMessage) Copy() *LogMessage { + if m == nil { + return nil + } + o := &LogMessage{} + o.CopyFrom(m) + return o +} + +func (m *LogMessage) CopyFrom(src interface{}) { + + o := src.(*LogMessage) + *m = *o + deepcopy.Copy(&m.Context, &o.Context) + if o.Timestamp != nil { + m.Timestamp = &google_protobuf.Timestamp{} + deepcopy.Copy(m.Timestamp, o.Timestamp) + } + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Attrs != nil { + m.Attrs = make([]LogAttr, len(o.Attrs)) + for i := range m.Attrs { + deepcopy.Copy(&m.Attrs[i], &o.Attrs[i]) + } + } + +} + +func (m *SubscribeLogsRequest) Copy() *SubscribeLogsRequest { + if m == nil { + return nil + } + o := &SubscribeLogsRequest{} + o.CopyFrom(m) + return o +} + +func (m *SubscribeLogsRequest) CopyFrom(src interface{}) { + + o := src.(*SubscribeLogsRequest) + *m = *o + if o.Selector != nil { + m.Selector = &LogSelector{} + deepcopy.Copy(m.Selector, o.Selector) + } + if o.Options != nil { + m.Options = &LogSubscriptionOptions{} + deepcopy.Copy(m.Options, o.Options) + } +} + +func (m *SubscribeLogsMessage) Copy() *SubscribeLogsMessage { + if m == nil { + return nil + } + o := &SubscribeLogsMessage{} + o.CopyFrom(m) + return o +} + +func (m *SubscribeLogsMessage) CopyFrom(src interface{}) { + + o := src.(*SubscribeLogsMessage) + *m = *o + if o.Messages != nil { + m.Messages = make([]LogMessage, len(o.Messages)) + for i := range m.Messages { + deepcopy.Copy(&m.Messages[i], &o.Messages[i]) + } + } + +} + +func (m *ListenSubscriptionsRequest) Copy() *ListenSubscriptionsRequest { + if m == nil { + return nil + } + o := &ListenSubscriptionsRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListenSubscriptionsRequest) CopyFrom(src interface{}) {} +func (m *SubscriptionMessage) Copy() *SubscriptionMessage { + if m == nil { + return nil + } + o := &SubscriptionMessage{} + o.CopyFrom(m) + return o +} + +func (m *SubscriptionMessage) CopyFrom(src interface{}) { + + o := src.(*SubscriptionMessage) + *m = *o + if o.Selector != nil { + m.Selector = &LogSelector{} + deepcopy.Copy(m.Selector, o.Selector) + } + if o.Options != nil { + m.Options = &LogSubscriptionOptions{} + deepcopy.Copy(m.Options, o.Options) + } +} + +func (m *PublishLogsMessage) Copy() *PublishLogsMessage { + if m == nil { + return nil + } + o := &PublishLogsMessage{} + o.CopyFrom(m) + return o +} + +func (m *PublishLogsMessage) CopyFrom(src interface{}) { + + o := src.(*PublishLogsMessage) + *m = *o + if o.Messages != nil { + m.Messages = make([]LogMessage, len(o.Messages)) + for i := range m.Messages { + deepcopy.Copy(&m.Messages[i], &o.Messages[i]) + } + } + +} + +func (m *PublishLogsResponse) Copy() *PublishLogsResponse { + if m == nil { + return nil + } + o := &PublishLogsResponse{} + o.CopyFrom(m) + return o +} + +func (m *PublishLogsResponse) CopyFrom(src interface{}) {} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Logs service + +type LogsClient interface { + // SubscribeLogs starts a subscription with the specified selector and options. + // + // The subscription will be distributed to relevant nodes and messages will + // be collected and sent via the returned stream. + // + // The subscription will end with an EOF. + SubscribeLogs(ctx context.Context, in *SubscribeLogsRequest, opts ...grpc.CallOption) (Logs_SubscribeLogsClient, error) +} + +type logsClient struct { + cc *grpc.ClientConn +} + +func NewLogsClient(cc *grpc.ClientConn) LogsClient { + return &logsClient{cc} +} + +func (c *logsClient) SubscribeLogs(ctx context.Context, in *SubscribeLogsRequest, opts ...grpc.CallOption) (Logs_SubscribeLogsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Logs_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Logs/SubscribeLogs", opts...) + if err != nil { + return nil, err + } + x := &logsSubscribeLogsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Logs_SubscribeLogsClient interface { + Recv() (*SubscribeLogsMessage, error) + grpc.ClientStream +} + +type logsSubscribeLogsClient struct { + grpc.ClientStream +} + +func (x *logsSubscribeLogsClient) Recv() (*SubscribeLogsMessage, error) { + m := new(SubscribeLogsMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Logs service + +type LogsServer interface { + // SubscribeLogs starts a subscription with the specified selector and options. + // + // The subscription will be distributed to relevant nodes and messages will + // be collected and sent via the returned stream. + // + // The subscription will end with an EOF. + SubscribeLogs(*SubscribeLogsRequest, Logs_SubscribeLogsServer) error +} + +func RegisterLogsServer(s *grpc.Server, srv LogsServer) { + s.RegisterService(&_Logs_serviceDesc, srv) +} + +func _Logs_SubscribeLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeLogsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(LogsServer).SubscribeLogs(m, &logsSubscribeLogsServer{stream}) +} + +type Logs_SubscribeLogsServer interface { + Send(*SubscribeLogsMessage) error + grpc.ServerStream +} + +type logsSubscribeLogsServer struct { + grpc.ServerStream +} + +func (x *logsSubscribeLogsServer) Send(m *SubscribeLogsMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _Logs_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Logs", + HandlerType: (*LogsServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SubscribeLogs", + Handler: _Logs_SubscribeLogs_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/logbroker.proto", +} + +// Client API for LogBroker service + +type LogBrokerClient interface { + // ListenSubscriptions starts a subscription stream for the node. For each + // message received, the node should attempt to satisfy the subscription. + // + // Log messages that match the provided subscription should be sent via + // PublishLogs. + ListenSubscriptions(ctx context.Context, in *ListenSubscriptionsRequest, opts ...grpc.CallOption) (LogBroker_ListenSubscriptionsClient, error) + // PublishLogs receives sets of log messages destined for a single + // subscription identifier. + PublishLogs(ctx context.Context, opts ...grpc.CallOption) (LogBroker_PublishLogsClient, error) +} + +type logBrokerClient struct { + cc *grpc.ClientConn +} + +func NewLogBrokerClient(cc *grpc.ClientConn) LogBrokerClient { + return &logBrokerClient{cc} +} + +func (c *logBrokerClient) ListenSubscriptions(ctx context.Context, in *ListenSubscriptionsRequest, opts ...grpc.CallOption) (LogBroker_ListenSubscriptionsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_LogBroker_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.LogBroker/ListenSubscriptions", opts...) + if err != nil { + return nil, err + } + x := &logBrokerListenSubscriptionsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type LogBroker_ListenSubscriptionsClient interface { + Recv() (*SubscriptionMessage, error) + grpc.ClientStream +} + +type logBrokerListenSubscriptionsClient struct { + grpc.ClientStream +} + +func (x *logBrokerListenSubscriptionsClient) Recv() (*SubscriptionMessage, error) { + m := new(SubscriptionMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *logBrokerClient) PublishLogs(ctx context.Context, opts ...grpc.CallOption) (LogBroker_PublishLogsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_LogBroker_serviceDesc.Streams[1], c.cc, "/docker.swarmkit.v1.LogBroker/PublishLogs", opts...) + if err != nil { + return nil, err + } + x := &logBrokerPublishLogsClient{stream} + return x, nil +} + +type LogBroker_PublishLogsClient interface { + Send(*PublishLogsMessage) error + CloseAndRecv() (*PublishLogsResponse, error) + grpc.ClientStream +} + +type logBrokerPublishLogsClient struct { + grpc.ClientStream +} + +func (x *logBrokerPublishLogsClient) Send(m *PublishLogsMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *logBrokerPublishLogsClient) CloseAndRecv() (*PublishLogsResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(PublishLogsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for LogBroker service + +type LogBrokerServer interface { + // ListenSubscriptions starts a subscription stream for the node. For each + // message received, the node should attempt to satisfy the subscription. + // + // Log messages that match the provided subscription should be sent via + // PublishLogs. + ListenSubscriptions(*ListenSubscriptionsRequest, LogBroker_ListenSubscriptionsServer) error + // PublishLogs receives sets of log messages destined for a single + // subscription identifier. + PublishLogs(LogBroker_PublishLogsServer) error +} + +func RegisterLogBrokerServer(s *grpc.Server, srv LogBrokerServer) { + s.RegisterService(&_LogBroker_serviceDesc, srv) +} + +func _LogBroker_ListenSubscriptions_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListenSubscriptionsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(LogBrokerServer).ListenSubscriptions(m, &logBrokerListenSubscriptionsServer{stream}) +} + +type LogBroker_ListenSubscriptionsServer interface { + Send(*SubscriptionMessage) error + grpc.ServerStream +} + +type logBrokerListenSubscriptionsServer struct { + grpc.ServerStream +} + +func (x *logBrokerListenSubscriptionsServer) Send(m *SubscriptionMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _LogBroker_PublishLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LogBrokerServer).PublishLogs(&logBrokerPublishLogsServer{stream}) +} + +type LogBroker_PublishLogsServer interface { + SendAndClose(*PublishLogsResponse) error + Recv() (*PublishLogsMessage, error) + grpc.ServerStream +} + +type logBrokerPublishLogsServer struct { + grpc.ServerStream +} + +func (x *logBrokerPublishLogsServer) SendAndClose(m *PublishLogsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *logBrokerPublishLogsServer) Recv() (*PublishLogsMessage, error) { + m := new(PublishLogsMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _LogBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.LogBroker", + HandlerType: (*LogBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListenSubscriptions", + Handler: _LogBroker_ListenSubscriptions_Handler, + ServerStreams: true, + }, + { + StreamName: "PublishLogs", + Handler: _LogBroker_PublishLogs_Handler, + ClientStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/logbroker.proto", +} + +func (m *LogSubscriptionOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogSubscriptionOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Streams) > 0 { + for _, num := range m.Streams { + dAtA[i] = 0x8 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(num)) + } + } + if m.Follow { + dAtA[i] = 0x10 + i++ + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Tail != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Tail)) + } + if m.Since != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Since.Size())) + n1, err := m.Since.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *LogSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.TaskIDs) > 0 { + for _, s := range m.TaskIDs { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *LogContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if len(m.NodeID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if len(m.TaskID) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + return i, nil +} + +func (m *LogAttr) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogAttr) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *LogMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Context.Size())) + n2, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.Timestamp != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Timestamp.Size())) + n3, err := m.Timestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Stream != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Stream)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Attrs) > 0 { + for _, msg := range m.Attrs { + dAtA[i] = 0x2a + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SubscribeLogsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscribeLogsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Selector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Selector.Size())) + n4, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Options != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Options.Size())) + n5, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *SubscribeLogsMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscribeLogsMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Messages) > 0 { + for _, msg := range m.Messages { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ListenSubscriptionsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenSubscriptionsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *SubscriptionMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscriptionMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Selector.Size())) + n6, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Options != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Options.Size())) + n7, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Close { + dAtA[i] = 0x20 + i++ + if m.Close { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PublishLogsMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PublishLogsMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SubscriptionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.SubscriptionID))) + i += copy(dAtA[i:], m.SubscriptionID) + } + if len(m.Messages) > 0 { + for _, msg := range m.Messages { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Close { + dAtA[i] = 0x18 + i++ + if m.Close { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PublishLogsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PublishLogsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintLogbroker(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyLogsServer struct { + local LogsServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyLogsServer(local LogsServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) LogsServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyLogsServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyLogsServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyLogsServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type Logs_SubscribeLogsServerWrapper struct { + Logs_SubscribeLogsServer + ctx context.Context +} + +func (s Logs_SubscribeLogsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyLogsServer) SubscribeLogs(r *SubscribeLogsRequest, stream Logs_SubscribeLogsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Logs_SubscribeLogsServerWrapper{ + Logs_SubscribeLogsServer: stream, + ctx: ctx, + } + return p.local.SubscribeLogs(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewLogsClient(conn).SubscribeLogs(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type raftProxyLogBrokerServer struct { + local LogBrokerServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyLogBrokerServer(local LogBrokerServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) LogBrokerServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyLogBrokerServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyLogBrokerServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyLogBrokerServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type LogBroker_ListenSubscriptionsServerWrapper struct { + LogBroker_ListenSubscriptionsServer + ctx context.Context +} + +func (s LogBroker_ListenSubscriptionsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyLogBrokerServer) ListenSubscriptions(r *ListenSubscriptionsRequest, stream LogBroker_ListenSubscriptionsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := LogBroker_ListenSubscriptionsServerWrapper{ + LogBroker_ListenSubscriptionsServer: stream, + ctx: ctx, + } + return p.local.ListenSubscriptions(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewLogBrokerClient(conn).ListenSubscriptions(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type LogBroker_PublishLogsServerWrapper struct { + LogBroker_PublishLogsServer + ctx context.Context +} + +func (s LogBroker_PublishLogsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyLogBrokerServer) PublishLogs(stream LogBroker_PublishLogsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := LogBroker_PublishLogsServerWrapper{ + LogBroker_PublishLogsServer: stream, + ctx: ctx, + } + return p.local.PublishLogs(streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewLogBrokerClient(conn).PublishLogs(ctx) + + if err != nil { + return err + } + + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := clientStream.Send(msg); err != nil { + return err + } + } + + reply, err := clientStream.CloseAndRecv() + if err != nil { + return err + } + + return stream.SendAndClose(reply) +} + +func (m *LogSubscriptionOptions) Size() (n int) { + var l int + _ = l + if len(m.Streams) > 0 { + for _, e := range m.Streams { + n += 1 + sovLogbroker(uint64(e)) + } + } + if m.Follow { + n += 2 + } + if m.Tail != 0 { + n += 1 + sovLogbroker(uint64(m.Tail)) + } + if m.Since != nil { + l = m.Since.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *LogSelector) Size() (n int) { + var l int + _ = l + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + l = len(s) + n += 1 + l + sovLogbroker(uint64(l)) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + l = len(s) + n += 1 + l + sovLogbroker(uint64(l)) + } + } + if len(m.TaskIDs) > 0 { + for _, s := range m.TaskIDs { + l = len(s) + n += 1 + l + sovLogbroker(uint64(l)) + } + } + return n +} + +func (m *LogContext) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *LogAttr) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *LogMessage) Size() (n int) { + var l int + _ = l + l = m.Context.Size() + n += 1 + l + sovLogbroker(uint64(l)) + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Stream != 0 { + n += 1 + sovLogbroker(uint64(m.Stream)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + if len(m.Attrs) > 0 { + for _, e := range m.Attrs { + l = e.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + } + return n +} + +func (m *SubscribeLogsRequest) Size() (n int) { + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *SubscribeLogsMessage) Size() (n int) { + var l int + _ = l + if len(m.Messages) > 0 { + for _, e := range m.Messages { + l = e.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + } + return n +} + +func (m *ListenSubscriptionsRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *SubscriptionMessage) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Close { + n += 2 + } + return n +} + +func (m *PublishLogsMessage) Size() (n int) { + var l int + _ = l + l = len(m.SubscriptionID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + if len(m.Messages) > 0 { + for _, e := range m.Messages { + l = e.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + } + if m.Close { + n += 2 + } + return n +} + +func (m *PublishLogsResponse) Size() (n int) { + var l int + _ = l + return n +} + +func sovLogbroker(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozLogbroker(x uint64) (n int) { + return sovLogbroker(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LogSubscriptionOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogSubscriptionOptions{`, + `Streams:` + fmt.Sprintf("%v", this.Streams) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Tail:` + fmt.Sprintf("%v", this.Tail) + `,`, + `Since:` + strings.Replace(fmt.Sprintf("%v", this.Since), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LogSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogSelector{`, + `ServiceIDs:` + fmt.Sprintf("%v", this.ServiceIDs) + `,`, + `NodeIDs:` + fmt.Sprintf("%v", this.NodeIDs) + `,`, + `TaskIDs:` + fmt.Sprintf("%v", this.TaskIDs) + `,`, + `}`, + }, "") + return s +} +func (this *LogContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogContext{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `}`, + }, "") + return s +} +func (this *LogAttr) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogAttr{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *LogMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogMessage{`, + `Context:` + strings.Replace(strings.Replace(this.Context.String(), "LogContext", "LogContext", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `Stream:` + fmt.Sprintf("%v", this.Stream) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Attrs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Attrs), "LogAttr", "LogAttr", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubscribeLogsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubscribeLogsRequest{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LogSelector", "LogSelector", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "LogSubscriptionOptions", "LogSubscriptionOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubscribeLogsMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubscribeLogsMessage{`, + `Messages:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Messages), "LogMessage", "LogMessage", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListenSubscriptionsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListenSubscriptionsRequest{`, + `}`, + }, "") + return s +} +func (this *SubscriptionMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubscriptionMessage{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LogSelector", "LogSelector", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "LogSubscriptionOptions", "LogSubscriptionOptions", 1) + `,`, + `Close:` + fmt.Sprintf("%v", this.Close) + `,`, + `}`, + }, "") + return s +} +func (this *PublishLogsMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PublishLogsMessage{`, + `SubscriptionID:` + fmt.Sprintf("%v", this.SubscriptionID) + `,`, + `Messages:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Messages), "LogMessage", "LogMessage", 1), `&`, ``, 1) + `,`, + `Close:` + fmt.Sprintf("%v", this.Close) + `,`, + `}`, + }, "") + return s +} +func (this *PublishLogsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PublishLogsResponse{`, + `}`, + }, "") + return s +} +func valueToStringLogbroker(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LogSubscriptionOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogSubscriptionOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogSubscriptionOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v LogStream + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (LogStream(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Streams = append(m.Streams, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v LogStream + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (LogStream(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Streams = append(m.Streams, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Tail", wireType) + } + m.Tail = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Tail |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Since == nil { + m.Since = &google_protobuf.Timestamp{} + } + if err := m.Since.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceIDs = append(m.ServiceIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeIDs = append(m.NodeIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskIDs = append(m.TaskIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogAttr) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogAttr: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogAttr: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &google_protobuf.Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + m.Stream = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Stream |= (LogStream(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attrs = append(m.Attrs, LogAttr{}) + if err := m.Attrs[len(m.Attrs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscribeLogsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscribeLogsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscribeLogsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LogSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &LogSubscriptionOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscribeLogsMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscribeLogsMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscribeLogsMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Messages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Messages = append(m.Messages, LogMessage{}) + if err := m.Messages[len(m.Messages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListenSubscriptionsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListenSubscriptionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListenSubscriptionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscriptionMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscriptionMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscriptionMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LogSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &LogSubscriptionOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Close", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Close = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PublishLogsMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PublishLogsMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PublishLogsMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubscriptionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubscriptionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Messages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Messages = append(m.Messages, LogMessage{}) + if err := m.Messages[len(m.Messages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Close", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Close = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PublishLogsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PublishLogsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PublishLogsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLogbroker(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthLogbroker + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipLogbroker(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthLogbroker = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLogbroker = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/logbroker.proto", fileDescriptorLogbroker) +} + +var fileDescriptorLogbroker = []byte{ + // 966 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xc7, 0x3d, 0xeb, 0xc4, 0x8e, 0x9f, 0x9b, 0xc4, 0x9d, 0xa4, 0x91, 0x65, 0xa8, 0x6d, 0x6d, + 0xa5, 0x62, 0x45, 0x65, 0xdd, 0x1a, 0xa1, 0x22, 0x45, 0x42, 0xd4, 0xb8, 0x42, 0x16, 0x6e, 0x82, + 0xc6, 0x8e, 0xe0, 0x16, 0xad, 0xbd, 0xd3, 0xed, 0xca, 0xeb, 0x1d, 0xb3, 0x33, 0x4e, 0x40, 0xe2, + 0xc0, 0xa1, 0x48, 0x28, 0x07, 0x6e, 0x48, 0x70, 0xe8, 0x89, 0x5e, 0x10, 0x12, 0x17, 0x6e, 0x7c, + 0x00, 0x14, 0x71, 0xe2, 0xc8, 0xc9, 0xa2, 0xfb, 0x01, 0xf8, 0x0c, 0x68, 0x67, 0xd6, 0xeb, 0x0d, + 0xb6, 0x53, 0x54, 0x2e, 0xf6, 0x8c, 0xe7, 0xf7, 0xf6, 0xfd, 0xdf, 0x7f, 0xde, 0x5b, 0x83, 0x61, + 0x3b, 0xe2, 0xc9, 0xa4, 0x6f, 0x0c, 0xd8, 0xa8, 0x6e, 0xb1, 0xc1, 0x90, 0xfa, 0x75, 0x7e, 0x66, + 0xfa, 0xa3, 0xa1, 0x23, 0xea, 0xe6, 0xd8, 0xa9, 0xbb, 0xcc, 0xee, 0xfb, 0x6c, 0x48, 0x7d, 0x63, + 0xec, 0x33, 0xc1, 0x30, 0x56, 0x90, 0x31, 0x83, 0x8c, 0xd3, 0x7b, 0xa5, 0x5d, 0x9b, 0xd9, 0x4c, + 0x1e, 0xd7, 0xc3, 0x95, 0x22, 0x4b, 0x15, 0x9b, 0x31, 0xdb, 0xa5, 0x75, 0xb9, 0xeb, 0x4f, 0x1e, + 0xd7, 0x85, 0x33, 0xa2, 0x5c, 0x98, 0xa3, 0x71, 0x04, 0xdc, 0xbf, 0x22, 0x75, 0x1c, 0x34, 0x76, + 0x27, 0xb6, 0xe3, 0x45, 0x5f, 0x2a, 0x50, 0xff, 0x05, 0xc1, 0x5e, 0x87, 0xd9, 0xdd, 0x49, 0x9f, + 0x0f, 0x7c, 0x67, 0x2c, 0x1c, 0xe6, 0x1d, 0xc9, 0x4f, 0x8e, 0x0f, 0x20, 0xcb, 0x85, 0x4f, 0xcd, + 0x11, 0x2f, 0xa2, 0x6a, 0xba, 0xb6, 0xd5, 0xb8, 0x69, 0x2c, 0x0a, 0x36, 0xc2, 0x60, 0x49, 0x35, + 0xb5, 0x42, 0x8a, 0xcc, 0x22, 0xf0, 0x1e, 0x64, 0x1e, 0x33, 0xd7, 0x65, 0x67, 0x45, 0xad, 0x8a, + 0x6a, 0x1b, 0x24, 0xda, 0x61, 0x0c, 0x6b, 0xc2, 0x74, 0xdc, 0x62, 0xba, 0x8a, 0x6a, 0x69, 0x22, + 0xd7, 0xf8, 0x2e, 0xac, 0x73, 0xc7, 0x1b, 0xd0, 0xe2, 0x5a, 0x15, 0xd5, 0xf2, 0x8d, 0x92, 0xa1, + 0xaa, 0x35, 0x66, 0xc2, 0x8d, 0xde, 0xac, 0x5a, 0xa2, 0x40, 0xfd, 0x1b, 0x04, 0xf9, 0x30, 0x31, + 0x75, 0xe9, 0x40, 0x30, 0x1f, 0xd7, 0x21, 0xcf, 0xa9, 0x7f, 0xea, 0x0c, 0xe8, 0x89, 0x63, 0x29, + 0xb9, 0xb9, 0xe6, 0x56, 0x30, 0xad, 0x40, 0x57, 0xfd, 0xdc, 0x6e, 0x71, 0x02, 0x11, 0xd2, 0xb6, + 0x38, 0xbe, 0x0d, 0x1b, 0x1e, 0xb3, 0x14, 0xad, 0x49, 0x3a, 0x1f, 0x4c, 0x2b, 0xd9, 0x43, 0x66, + 0x49, 0x34, 0x1b, 0x1e, 0x46, 0x9c, 0x30, 0xf9, 0x50, 0x72, 0xe9, 0x39, 0xd7, 0x33, 0xf9, 0x50, + 0x72, 0xe1, 0x61, 0xdb, 0xe2, 0xfa, 0x53, 0x04, 0xd0, 0x61, 0xf6, 0xfb, 0xcc, 0x13, 0xf4, 0x33, + 0x81, 0xef, 0x00, 0xcc, 0xf5, 0x14, 0x51, 0x15, 0xd5, 0x72, 0xcd, 0xcd, 0x60, 0x5a, 0xc9, 0xc5, + 0x72, 0x48, 0x2e, 0x56, 0x83, 0x6f, 0x41, 0x36, 0x12, 0x23, 0xcd, 0xca, 0x35, 0x21, 0x98, 0x56, + 0x32, 0x4a, 0x0b, 0xc9, 0x28, 0x29, 0x21, 0x14, 0x29, 0x91, 0xde, 0x45, 0x90, 0x12, 0x42, 0x32, + 0x4a, 0x87, 0x7e, 0x0f, 0xb2, 0x1d, 0x66, 0x3f, 0x10, 0xc2, 0xc7, 0x05, 0x48, 0x0f, 0xe9, 0xe7, + 0x2a, 0x37, 0x09, 0x97, 0x78, 0x17, 0xd6, 0x4f, 0x4d, 0x77, 0x42, 0x55, 0x12, 0xa2, 0x36, 0xfa, + 0xb9, 0x26, 0x95, 0x3f, 0xa2, 0x9c, 0x9b, 0x36, 0xc5, 0xef, 0x42, 0x76, 0xa0, 0x8a, 0x90, 0xa1, + 0xf9, 0x46, 0x79, 0xc5, 0xa5, 0x47, 0xa5, 0x36, 0xd7, 0x2e, 0xa6, 0x95, 0x14, 0x99, 0x05, 0xe1, + 0x77, 0x20, 0x17, 0xf7, 0xa6, 0x4c, 0x74, 0xf5, 0x7d, 0xce, 0x61, 0xfc, 0x36, 0x64, 0x54, 0xf3, + 0xc8, 0xfa, 0x5e, 0xd6, 0x6d, 0x24, 0x82, 0xc3, 0x86, 0xb2, 0x4c, 0x61, 0xca, 0xde, 0xb9, 0x46, + 0xe4, 0x1a, 0xdf, 0x87, 0x75, 0x53, 0x08, 0x9f, 0x17, 0xd7, 0xab, 0xe9, 0x5a, 0xbe, 0xf1, 0xda, + 0x8a, 0x27, 0x85, 0x3e, 0x45, 0xfa, 0x15, 0xaf, 0x7f, 0x8f, 0x60, 0x37, 0x1a, 0x85, 0x3e, 0xed, + 0x30, 0x9b, 0x13, 0xfa, 0xe9, 0x84, 0x72, 0x81, 0x0f, 0x60, 0x83, 0x47, 0xcd, 0x16, 0xf9, 0x52, + 0x59, 0x25, 0x2f, 0xc2, 0x48, 0x1c, 0x80, 0x5b, 0x90, 0x65, 0x6a, 0xa6, 0x22, 0x47, 0xf6, 0x57, + 0xc5, 0x2e, 0x4e, 0x21, 0x99, 0x85, 0xea, 0x9f, 0xfc, 0x4b, 0xda, 0xec, 0xc6, 0xde, 0x83, 0x8d, + 0x91, 0x5a, 0xaa, 0xc6, 0x5f, 0x7d, 0x65, 0x51, 0x44, 0x54, 0x72, 0x1c, 0xa5, 0xbf, 0x0e, 0xa5, + 0x8e, 0xc3, 0x05, 0xf5, 0x92, 0xf9, 0x67, 0xa5, 0xeb, 0xbf, 0x21, 0xd8, 0x49, 0x1e, 0xcc, 0xf2, + 0xee, 0x81, 0x16, 0xf7, 0x76, 0x26, 0x98, 0x56, 0xb4, 0x76, 0x8b, 0x68, 0x8e, 0x75, 0xc9, 0x2a, + 0xed, 0x7f, 0x58, 0x95, 0x7e, 0x65, 0xab, 0xc2, 0x4e, 0x1f, 0xb8, 0x8c, 0xab, 0x17, 0xca, 0x06, + 0x51, 0x1b, 0xfd, 0x47, 0x04, 0xf8, 0xa3, 0x49, 0xdf, 0x75, 0xf8, 0x93, 0xa4, 0x7f, 0x07, 0xb0, + 0xcd, 0x13, 0x0f, 0x9b, 0x0f, 0x2c, 0x0e, 0xa6, 0x95, 0xad, 0x64, 0x9e, 0x76, 0x8b, 0x6c, 0x25, + 0xd1, 0xb6, 0x75, 0xc9, 0x7c, 0xed, 0x55, 0xcc, 0x9f, 0x6b, 0x4d, 0x27, 0xb5, 0xde, 0x80, 0x9d, + 0x84, 0x54, 0x42, 0xf9, 0x98, 0x79, 0x9c, 0xee, 0x3f, 0x47, 0x90, 0x8b, 0x47, 0x00, 0xdf, 0x01, + 0xdc, 0x39, 0xfa, 0xe0, 0xa4, 0xdb, 0x23, 0x0f, 0x1f, 0x3c, 0x3a, 0x39, 0x3e, 0xfc, 0xf0, 0xf0, + 0xe8, 0xe3, 0xc3, 0x42, 0xaa, 0xb4, 0x7b, 0xfe, 0xac, 0x5a, 0x88, 0xb1, 0x63, 0x6f, 0xe8, 0xb1, + 0x33, 0x0f, 0xef, 0xc3, 0xf5, 0x04, 0xdd, 0xed, 0xb5, 0x8e, 0x8e, 0x7b, 0x05, 0x54, 0xda, 0x39, + 0x7f, 0x56, 0xdd, 0x8e, 0xe1, 0xae, 0xb0, 0xd8, 0x44, 0x2c, 0xb2, 0x0f, 0x09, 0x29, 0x68, 0x8b, + 0x2c, 0xf5, 0xfd, 0xd2, 0xf5, 0xaf, 0x7f, 0x28, 0xa7, 0x7e, 0x7d, 0x5e, 0x9e, 0x0b, 0x6b, 0x3c, + 0x45, 0xb0, 0x16, 0xea, 0xc6, 0x5f, 0xc0, 0xe6, 0xa5, 0x9e, 0xc5, 0xb5, 0x65, 0xee, 0x2c, 0x9b, + 0xb8, 0xd2, 0xcb, 0xc9, 0xc8, 0x51, 0xfd, 0xc6, 0xef, 0x3f, 0xff, 0xfd, 0x9d, 0xb6, 0x0d, 0x9b, + 0x92, 0x7c, 0x73, 0x64, 0x7a, 0xa6, 0x4d, 0xfd, 0xbb, 0xa8, 0xf1, 0x93, 0x26, 0xdd, 0x6a, 0xca, + 0xff, 0x5c, 0xfc, 0x2d, 0x82, 0x9d, 0x25, 0x6d, 0x8e, 0x8d, 0xa5, 0x17, 0xb6, 0x72, 0x1e, 0x4a, + 0x6f, 0x5c, 0x21, 0x2c, 0x39, 0x20, 0xfa, 0x2d, 0xa9, 0xeb, 0x26, 0x5c, 0x53, 0xba, 0xce, 0x98, + 0x3f, 0xa4, 0xfe, 0x82, 0x4a, 0xfc, 0x15, 0x82, 0x7c, 0xe2, 0xae, 0xf1, 0xed, 0x65, 0xcf, 0x5f, + 0xec, 0xdb, 0xe5, 0x3a, 0x96, 0x34, 0xcd, 0x7f, 0xd2, 0x51, 0x43, 0xcd, 0xe2, 0xc5, 0x8b, 0x72, + 0xea, 0xcf, 0x17, 0xe5, 0xd4, 0x97, 0x41, 0x19, 0x5d, 0x04, 0x65, 0xf4, 0x47, 0x50, 0x46, 0x7f, + 0x05, 0x65, 0xd4, 0xcf, 0xc8, 0x17, 0xf7, 0x5b, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x95, 0x7b, + 0x3c, 0x04, 0xe0, 0x08, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/logbroker.proto b/extender/code/vendor/github.com/docker/swarmkit/api/logbroker.proto new file mode 100644 index 000000000..1549640d4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/logbroker.proto @@ -0,0 +1,188 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// LogStream defines the stream from which the log message came. +enum LogStream { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "LogStream"; + + LOG_STREAM_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "LogStreamUnknown"]; + LOG_STREAM_STDOUT = 1 [(gogoproto.enumvalue_customname) = "LogStreamStdout"]; + LOG_STREAM_STDERR = 2 [(gogoproto.enumvalue_customname) = "LogStreamStderr"]; +} + +message LogSubscriptionOptions { + // Streams defines which log streams should be sent from the task source. + // Empty means send all the messages. + repeated LogStream streams = 1 [packed=false]; + + // Follow instructs the publisher to continue sending log messages as they + // are produced, after satisfying the initial query. + bool follow = 2; + + // Tail defines how many messages relative to the log stream to send when + // starting the stream. + // + // Positive values will skip that number of messages from the start of the + // stream before publishing. + // + // Negative values will specify messages relative to the end of the stream, + // offset by one. We can say that the last (-n-1) lines are returned when n + // < 0. As reference, -1 would mean send no log lines (typically used with + // follow), -2 would return the last log line, -11 would return the last 10 + // and so on. + // + // The default value of zero will return all logs. + // + // Note that this is very different from the Docker API. + int64 tail = 3; + + // Since indicates that only log messages produced after this timestamp + // should be sent. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp since = 4; +} + +// LogSelector will match logs from ANY of the defined parameters. +// +// For the best effect, the client should use the least specific parameter +// possible. For example, if they want to listen to all the tasks of a service, +// they should use the service id, rather than specifying the individual tasks. +message LogSelector { + repeated string service_ids = 1; + repeated string node_ids = 2; + repeated string task_ids = 3; +} + +// LogContext marks the context from which a log message was generated. +message LogContext { + string service_id = 1; + string node_id = 2; + string task_id = 3; +} + +// LogAttr is an extra key/value pair that may be have been set by users +message LogAttr { + string key = 1; + string value = 2; +} + +// LogMessage +message LogMessage { + // Context identifies the source of the log message. + LogContext context = 1 [(gogoproto.nullable) = false]; + + // Timestamp is the time at which the message was generated. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp timestamp = 2; + + // Stream identifies the stream of the log message, stdout or stderr. + LogStream stream = 3; + + // Data is the raw log message, as generated by the application. + bytes data = 4; + + // Attrs is a list of key value pairs representing additional log details + // that may have been returned from the logger + repeated LogAttr attrs = 5 [(gogoproto.nullable) = false]; +} + +// Logs defines the methods for retrieving task logs messages from a cluster. +service Logs { + // SubscribeLogs starts a subscription with the specified selector and options. + // + // The subscription will be distributed to relevant nodes and messages will + // be collected and sent via the returned stream. + // + // The subscription will end with an EOF. + rpc SubscribeLogs(SubscribeLogsRequest) returns (stream SubscribeLogsMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } +} + +message SubscribeLogsRequest { + // LogSelector describes the logs to which the subscriber is + LogSelector selector = 1; + + LogSubscriptionOptions options = 2; +} + +message SubscribeLogsMessage { + repeated LogMessage messages = 1 [(gogoproto.nullable) = false]; +} + +// LogBroker defines the API used by the worker to send task logs back to a +// manager. A client listens for subscriptions then optimistically retrieves +// logs satisfying said subscriptions, calling PublishLogs for results that are +// relevant. +// +// The structure of ListenSubscriptions is similar to the Dispatcher API but +// decoupled to allow log distribution to work outside of the regular task +// flow. +service LogBroker { + // ListenSubscriptions starts a subscription stream for the node. For each + // message received, the node should attempt to satisfy the subscription. + // + // Log messages that match the provided subscription should be sent via + // PublishLogs. + rpc ListenSubscriptions(ListenSubscriptionsRequest) returns (stream SubscriptionMessage) { + option (docker.protobuf.plugin.tls_authorization) = { + roles: "swarm-worker" + roles: "swarm-manager" + }; + } + + // PublishLogs receives sets of log messages destined for a single + // subscription identifier. + rpc PublishLogs(stream PublishLogsMessage) returns (PublishLogsResponse) { + option (docker.protobuf.plugin.tls_authorization) = { + roles: "swarm-worker" + roles: "swarm-manager" + }; + } +} + +// ListenSubscriptionsRequest is a placeholder to begin listening for +// subscriptions. +message ListenSubscriptionsRequest { } + +// SubscriptionMessage instructs the listener to start publishing messages for +// the stream or end a subscription. +// +// If Options.Follow == false, the worker should end the subscription on its own. +message SubscriptionMessage { + // ID identifies the subscription. + string id = 1; + + // Selector defines which sources should be sent for the subscription. + LogSelector selector = 2; + + // Options specify how the subscription should be satisfied. + LogSubscriptionOptions options = 3; + + // Close will be true if the node should shutdown the subscription with the + // provided identifier. + bool close = 4; +} + +message PublishLogsMessage { + // SubscriptionID identifies which subscription the set of messages should + // be sent to. We can think of this as a "mail box" for the subscription. + string subscription_id = 1; + + // Messages is the log message for publishing. + repeated LogMessage messages = 2 [(gogoproto.nullable) = false]; + + // Close is a boolean for whether or not the client has completed its log + // stream. When close is called, the manager can hang up the subscription. + // Any further logs from this subscription are an error condition. Any + // messages included when close is set can be discarded + bool close = 3; +} + +message PublishLogsResponse { } diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/objects.pb.go b/extender/code/vendor/github.com/docker/swarmkit/api/objects.pb.go new file mode 100644 index 000000000..5ae93011c --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/objects.pb.go @@ -0,0 +1,8138 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/objects.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf3 "github.com/gogo/protobuf/types" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import go_events "github.com/docker/go-events" +import strings "strings" + +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Meta contains metadata about objects. Every object contains a meta field. +type Meta struct { + // Version tracks the current version of the object. + Version Version `protobuf:"bytes,1,opt,name=version" json:"version"` + // Object timestamps. + // Note: can't use stdtime because these fields are nullable. + CreatedAt *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` + UpdatedAt *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt" json:"updated_at,omitempty"` +} + +func (m *Meta) Reset() { *m = Meta{} } +func (*Meta) ProtoMessage() {} +func (*Meta) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{0} } + +// Node provides the internal node state as seen by the cluster. +type Node struct { + // ID specifies the identity of the node. + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // Description encapsulated the properties of the Node as reported by the + // agent. + Description *NodeDescription `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `protobuf:"bytes,5,opt,name=status" json:"status"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `protobuf:"bytes,6,opt,name=manager_status,json=managerStatus" json:"manager_status,omitempty"` + // DEPRECATED: Use Attachments to find the ingress network + // The node attachment to the ingress network. + Attachment *NetworkAttachment `protobuf:"bytes,7,opt,name=attachment" json:"attachment,omitempty"` + // Certificate is the TLS certificate issued for the node, if any. + Certificate Certificate `protobuf:"bytes,8,opt,name=certificate" json:"certificate"` + // Role is the *observed* role for this node. It differs from the + // desired role set in Node.Spec.Role because the role here is only + // updated after the Raft member list has been reconciled with the + // desired role from the spec. + // + // This field represents the current reconciled state. If an action is + // to be performed, first verify the role in the cert. This field only + // shows the privilege level that the CA would currently grant when + // issuing or renewing the node's certificate. + Role NodeRole `protobuf:"varint,9,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // Attachments enumerates the network attachments for the node to set up an + // endpoint on the node to be used for load balancing. Each overlay + // network, including ingress network, will have an NetworkAttachment. + Attachments []*NetworkAttachment `protobuf:"bytes,10,rep,name=attachments" json:"attachments,omitempty"` +} + +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{1} } + +type Service struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Spec ServiceSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // SpecVersion versions Spec, to identify changes in the spec. Note that + // this is not directly comparable to the service's Version. + SpecVersion *Version `protobuf:"bytes,10,opt,name=spec_version,json=specVersion" json:"spec_version,omitempty"` + // PreviousSpec is the previous service spec that was in place before + // "Spec". + PreviousSpec *ServiceSpec `protobuf:"bytes,6,opt,name=previous_spec,json=previousSpec" json:"previous_spec,omitempty"` + // PreviousSpecVersion versions PreviousSpec. Note that this is not + // directly comparable to the service's Version. + PreviousSpecVersion *Version `protobuf:"bytes,11,opt,name=previous_spec_version,json=previousSpecVersion" json:"previous_spec_version,omitempty"` + // Runtime state of service endpoint. This may be different + // from the spec version because the user may not have entered + // the optional fields like node_port or virtual_ip and it + // could be auto allocated by the system. + Endpoint *Endpoint `protobuf:"bytes,4,opt,name=endpoint" json:"endpoint,omitempty"` + // UpdateStatus contains the status of an update, if one is in + // progress. + UpdateStatus *UpdateStatus `protobuf:"bytes,5,opt,name=update_status,json=updateStatus" json:"update_status,omitempty"` +} + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{2} } + +// Endpoint specified all the network parameters required to +// correctly discover and load balance a service +type Endpoint struct { + Spec *EndpointSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` + // Runtime state of the exposed ports which may carry + // auto-allocated swarm ports in addition to the user + // configured information. + Ports []*PortConfig `protobuf:"bytes,2,rep,name=ports" json:"ports,omitempty"` + // VirtualIPs specifies the IP addresses under which this endpoint will be + // made available. + VirtualIPs []*Endpoint_VirtualIP `protobuf:"bytes,3,rep,name=virtual_ips,json=virtualIps" json:"virtual_ips,omitempty"` +} + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{3} } + +// VirtualIP specifies a set of networks this endpoint will be attached to +// and the IP addresses the target service will be made available under. +type Endpoint_VirtualIP struct { + // NetworkID for which this endpoint attachment was created. + NetworkID string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // A virtual IP is used to address this service in IP + // layer that the client can use to send requests to + // this service. A DNS A/AAAA query on the service + // name might return this IP to the client. This is + // strictly a logical IP and there may not be any + // interfaces assigned this IP address or any route + // created for this address. More than one to + // accommodate for both IPv4 and IPv6 + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *Endpoint_VirtualIP) Reset() { *m = Endpoint_VirtualIP{} } +func (*Endpoint_VirtualIP) ProtoMessage() {} +func (*Endpoint_VirtualIP) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{3, 0} } + +// Task specifies the parameters for implementing a Spec. A task is effectively +// immutable and idempotent. Once it is dispatched to a node, it will not be +// dispatched to another node. +type Task struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec defines the desired state of the task as specified by the user. + // The system will honor this and will *never* modify it. + Spec TaskSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // SpecVersion is copied from Service, to identify which version of the + // spec this task has. Note that this is not directly comparable to the + // service's Version. + SpecVersion *Version `protobuf:"bytes,14,opt,name=spec_version,json=specVersion" json:"spec_version,omitempty"` + // ServiceID indicates the service under which this task is orchestrated. This + // should almost always be set. + ServiceID string `protobuf:"bytes,4,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + // Slot is the service slot number for a task. + // For example, if a replicated service has replicas = 2, there will be a + // task with slot = 1, and another with slot = 2. + Slot uint64 `protobuf:"varint,5,opt,name=slot,proto3" json:"slot,omitempty"` + // NodeID indicates the node to which the task is assigned. If this field + // is empty or not set, the task is unassigned. + NodeID string `protobuf:"bytes,6,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Annotations defines the names and labels for the runtime, as set by + // the cluster manager. + // + // As backup, if this field has an empty name, the runtime will + // allocate a unique name for the actual container. + // + // NOTE(stevvooe): The preserves the ability for us to making naming + // decisions for tasks in orchestrator, albeit, this is left empty for now. + Annotations Annotations `protobuf:"bytes,7,opt,name=annotations" json:"annotations"` + // ServiceAnnotations is a direct copy of the service name and labels when + // this task is created. + // + // Labels set here will *not* be propagated to the runtime target, such as a + // container. Use labels on the runtime target for that purpose. + ServiceAnnotations Annotations `protobuf:"bytes,8,opt,name=service_annotations,json=serviceAnnotations" json:"service_annotations"` + Status TaskStatus `protobuf:"bytes,9,opt,name=status" json:"status"` + // DesiredState is the target state for the task. It is set to + // TaskStateRunning when a task is first created, and changed to + // TaskStateShutdown if the manager wants to terminate the task. This field + // is only written by the manager. + DesiredState TaskState `protobuf:"varint,10,opt,name=desired_state,json=desiredState,proto3,enum=docker.swarmkit.v1.TaskState" json:"desired_state,omitempty"` + // List of network attachments by the task. + Networks []*NetworkAttachment `protobuf:"bytes,11,rep,name=networks" json:"networks,omitempty"` + // A copy of runtime state of service endpoint from Service + // object to be distributed to agents as part of the task. + Endpoint *Endpoint `protobuf:"bytes,12,opt,name=endpoint" json:"endpoint,omitempty"` + // LogDriver specifies the selected log driver to use for the task. Agent + // processes should always favor the value in this field. + // + // If present in the TaskSpec, this will be a copy of that value. The + // orchestrator may choose to insert a value here, which should be honored, + // such a cluster default or policy-based value. + // + // If not present, the daemon's default will be used. + LogDriver *Driver `protobuf:"bytes,13,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"` + AssignedGenericResources []*GenericResource `protobuf:"bytes,15,rep,name=assigned_generic_resources,json=assignedGenericResources" json:"assigned_generic_resources,omitempty"` +} + +func (m *Task) Reset() { *m = Task{} } +func (*Task) ProtoMessage() {} +func (*Task) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{4} } + +// NetworkAttachment specifies the network parameters of attachment to +// a single network by an object such as task or node. +type NetworkAttachment struct { + // Network state as a whole becomes part of the object so that + // it always is available for use in agents so that agents + // don't have any other dependency during execution. + Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"` + // List of IPv4/IPv6 addresses that are assigned to the object + // as part of getting attached to this network. + Addresses []string `protobuf:"bytes,2,rep,name=addresses" json:"addresses,omitempty"` + // List of aliases by which a task is resolved in a network + Aliases []string `protobuf:"bytes,3,rep,name=aliases" json:"aliases,omitempty"` + // Map of all the driver attachment options for this network + DriverAttachmentOpts map[string]string `protobuf:"bytes,4,rep,name=driver_attachment_opts,json=driverAttachmentOpts" json:"driver_attachment_opts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NetworkAttachment) Reset() { *m = NetworkAttachment{} } +func (*NetworkAttachment) ProtoMessage() {} +func (*NetworkAttachment) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{5} } + +type Network struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Spec NetworkSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // Driver specific operational state provided by the network driver. + DriverState *Driver `protobuf:"bytes,4,opt,name=driver_state,json=driverState" json:"driver_state,omitempty"` + // Runtime state of IPAM options. This may not reflect the + // ipam options from NetworkSpec. + IPAM *IPAMOptions `protobuf:"bytes,5,opt,name=ipam" json:"ipam,omitempty"` +} + +func (m *Network) Reset() { *m = Network{} } +func (*Network) ProtoMessage() {} +func (*Network) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{6} } + +// Cluster provides global cluster settings. +type Cluster struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Spec ClusterSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // RootCA contains key material for the root CA. + RootCA RootCA `protobuf:"bytes,4,opt,name=root_ca,json=rootCa" json:"root_ca"` + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + NetworkBootstrapKeys []*EncryptionKey `protobuf:"bytes,5,rep,name=network_bootstrap_keys,json=networkBootstrapKeys" json:"network_bootstrap_keys,omitempty"` + // Logical clock used to timestamp every key. It allows other managers + // and agents to unambiguously identify the older key to be deleted when + // a new key is allocated on key rotation. + EncryptionKeyLamportClock uint64 `protobuf:"varint,6,opt,name=encryption_key_lamport_clock,json=encryptionKeyLamportClock,proto3" json:"encryption_key_lamport_clock,omitempty"` + // BlacklistedCertificates tracks certificates that should no longer + // be honored. It's a mapping from CN -> BlacklistedCertificate. + // swarm. Their certificates should effectively be blacklisted. + BlacklistedCertificates map[string]*BlacklistedCertificate `protobuf:"bytes,8,rep,name=blacklisted_certificates,json=blacklistedCertificates" json:"blacklisted_certificates,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + // UnlockKeys defines the keys that lock node data at rest. For example, + // this would contain the key encrypting key (KEK) that will encrypt the + // manager TLS keys at rest and the raft encryption keys at rest. + // If the key is empty, the node will be unlocked (will not require a key + // to start up from a shut down state). + UnlockKeys []*EncryptionKey `protobuf:"bytes,9,rep,name=unlock_keys,json=unlockKeys" json:"unlock_keys,omitempty"` + // FIPS specifies whether this cluster should be in FIPS mode. This changes + // the format of the join tokens, and nodes that are not FIPS-enabled should + // reject joining the cluster. Nodes that report themselves to be non-FIPS + // should be rejected from the cluster. + FIPS bool `protobuf:"varint,10,opt,name=fips,proto3" json:"fips,omitempty"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{7} } + +// Secret represents a secret that should be passed to a container or a node, +// and is immutable. +type Secret struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec contains the actual secret data, as well as any context around the + // secret data that the user provides. + Spec SecretSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // Whether the secret is an internal secret (not set by a user) or not. + Internal bool `protobuf:"varint,4,opt,name=internal,proto3" json:"internal,omitempty"` +} + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{8} } + +// Config represents a set of configuration files that should be passed to a +// container. +type Config struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec contains the actual config data, as well as any context around the + // config data that the user provides. + Spec ConfigSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` +} + +func (m *Config) Reset() { *m = Config{} } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{9} } + +// Resource is a top-level object with externally defined content and indexing. +// SwarmKit can serve as a store for these objects without understanding their +// meanings. +type Resource struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Annotations Annotations `protobuf:"bytes,3,opt,name=annotations" json:"annotations"` + // Kind identifies this class of object. It is essentially a namespace + // to keep IDs or indices from colliding between unrelated Resource + // objects. This must correspond to the name of an Extension. + Kind string `protobuf:"bytes,4,opt,name=kind,proto3" json:"kind,omitempty"` + // Payload bytes. This data is not interpreted in any way by SwarmKit. + // By convention, it should be a marshalled protocol buffers message. + Payload *google_protobuf3.Any `protobuf:"bytes,5,opt,name=payload" json:"payload,omitempty"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{10} } + +// Extension declares a type of "resource" object. This message provides some +// metadata about the objects. +type Extension struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Annotations Annotations `protobuf:"bytes,3,opt,name=annotations" json:"annotations"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` +} + +func (m *Extension) Reset() { *m = Extension{} } +func (*Extension) ProtoMessage() {} +func (*Extension) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{11} } + +func init() { + proto.RegisterType((*Meta)(nil), "docker.swarmkit.v1.Meta") + proto.RegisterType((*Node)(nil), "docker.swarmkit.v1.Node") + proto.RegisterType((*Service)(nil), "docker.swarmkit.v1.Service") + proto.RegisterType((*Endpoint)(nil), "docker.swarmkit.v1.Endpoint") + proto.RegisterType((*Endpoint_VirtualIP)(nil), "docker.swarmkit.v1.Endpoint.VirtualIP") + proto.RegisterType((*Task)(nil), "docker.swarmkit.v1.Task") + proto.RegisterType((*NetworkAttachment)(nil), "docker.swarmkit.v1.NetworkAttachment") + proto.RegisterType((*Network)(nil), "docker.swarmkit.v1.Network") + proto.RegisterType((*Cluster)(nil), "docker.swarmkit.v1.Cluster") + proto.RegisterType((*Secret)(nil), "docker.swarmkit.v1.Secret") + proto.RegisterType((*Config)(nil), "docker.swarmkit.v1.Config") + proto.RegisterType((*Resource)(nil), "docker.swarmkit.v1.Resource") + proto.RegisterType((*Extension)(nil), "docker.swarmkit.v1.Extension") +} + +func (m *Meta) Copy() *Meta { + if m == nil { + return nil + } + o := &Meta{} + o.CopyFrom(m) + return o +} + +func (m *Meta) CopyFrom(src interface{}) { + + o := src.(*Meta) + *m = *o + deepcopy.Copy(&m.Version, &o.Version) + if o.CreatedAt != nil { + m.CreatedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.CreatedAt, o.CreatedAt) + } + if o.UpdatedAt != nil { + m.UpdatedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.UpdatedAt, o.UpdatedAt) + } +} + +func (m *Node) Copy() *Node { + if m == nil { + return nil + } + o := &Node{} + o.CopyFrom(m) + return o +} + +func (m *Node) CopyFrom(src interface{}) { + + o := src.(*Node) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + if o.Description != nil { + m.Description = &NodeDescription{} + deepcopy.Copy(m.Description, o.Description) + } + deepcopy.Copy(&m.Status, &o.Status) + if o.ManagerStatus != nil { + m.ManagerStatus = &ManagerStatus{} + deepcopy.Copy(m.ManagerStatus, o.ManagerStatus) + } + if o.Attachment != nil { + m.Attachment = &NetworkAttachment{} + deepcopy.Copy(m.Attachment, o.Attachment) + } + deepcopy.Copy(&m.Certificate, &o.Certificate) + if o.Attachments != nil { + m.Attachments = make([]*NetworkAttachment, len(o.Attachments)) + for i := range m.Attachments { + m.Attachments[i] = &NetworkAttachment{} + deepcopy.Copy(m.Attachments[i], o.Attachments[i]) + } + } + +} + +func (m *Service) Copy() *Service { + if m == nil { + return nil + } + o := &Service{} + o.CopyFrom(m) + return o +} + +func (m *Service) CopyFrom(src interface{}) { + + o := src.(*Service) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + if o.SpecVersion != nil { + m.SpecVersion = &Version{} + deepcopy.Copy(m.SpecVersion, o.SpecVersion) + } + if o.PreviousSpec != nil { + m.PreviousSpec = &ServiceSpec{} + deepcopy.Copy(m.PreviousSpec, o.PreviousSpec) + } + if o.PreviousSpecVersion != nil { + m.PreviousSpecVersion = &Version{} + deepcopy.Copy(m.PreviousSpecVersion, o.PreviousSpecVersion) + } + if o.Endpoint != nil { + m.Endpoint = &Endpoint{} + deepcopy.Copy(m.Endpoint, o.Endpoint) + } + if o.UpdateStatus != nil { + m.UpdateStatus = &UpdateStatus{} + deepcopy.Copy(m.UpdateStatus, o.UpdateStatus) + } +} + +func (m *Endpoint) Copy() *Endpoint { + if m == nil { + return nil + } + o := &Endpoint{} + o.CopyFrom(m) + return o +} + +func (m *Endpoint) CopyFrom(src interface{}) { + + o := src.(*Endpoint) + *m = *o + if o.Spec != nil { + m.Spec = &EndpointSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } + if o.Ports != nil { + m.Ports = make([]*PortConfig, len(o.Ports)) + for i := range m.Ports { + m.Ports[i] = &PortConfig{} + deepcopy.Copy(m.Ports[i], o.Ports[i]) + } + } + + if o.VirtualIPs != nil { + m.VirtualIPs = make([]*Endpoint_VirtualIP, len(o.VirtualIPs)) + for i := range m.VirtualIPs { + m.VirtualIPs[i] = &Endpoint_VirtualIP{} + deepcopy.Copy(m.VirtualIPs[i], o.VirtualIPs[i]) + } + } + +} + +func (m *Endpoint_VirtualIP) Copy() *Endpoint_VirtualIP { + if m == nil { + return nil + } + o := &Endpoint_VirtualIP{} + o.CopyFrom(m) + return o +} + +func (m *Endpoint_VirtualIP) CopyFrom(src interface{}) { + + o := src.(*Endpoint_VirtualIP) + *m = *o +} + +func (m *Task) Copy() *Task { + if m == nil { + return nil + } + o := &Task{} + o.CopyFrom(m) + return o +} + +func (m *Task) CopyFrom(src interface{}) { + + o := src.(*Task) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + if o.SpecVersion != nil { + m.SpecVersion = &Version{} + deepcopy.Copy(m.SpecVersion, o.SpecVersion) + } + deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.ServiceAnnotations, &o.ServiceAnnotations) + deepcopy.Copy(&m.Status, &o.Status) + if o.Networks != nil { + m.Networks = make([]*NetworkAttachment, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &NetworkAttachment{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.Endpoint != nil { + m.Endpoint = &Endpoint{} + deepcopy.Copy(m.Endpoint, o.Endpoint) + } + if o.LogDriver != nil { + m.LogDriver = &Driver{} + deepcopy.Copy(m.LogDriver, o.LogDriver) + } + if o.AssignedGenericResources != nil { + m.AssignedGenericResources = make([]*GenericResource, len(o.AssignedGenericResources)) + for i := range m.AssignedGenericResources { + m.AssignedGenericResources[i] = &GenericResource{} + deepcopy.Copy(m.AssignedGenericResources[i], o.AssignedGenericResources[i]) + } + } + +} + +func (m *NetworkAttachment) Copy() *NetworkAttachment { + if m == nil { + return nil + } + o := &NetworkAttachment{} + o.CopyFrom(m) + return o +} + +func (m *NetworkAttachment) CopyFrom(src interface{}) { + + o := src.(*NetworkAttachment) + *m = *o + if o.Network != nil { + m.Network = &Network{} + deepcopy.Copy(m.Network, o.Network) + } + if o.Addresses != nil { + m.Addresses = make([]string, len(o.Addresses)) + copy(m.Addresses, o.Addresses) + } + + if o.Aliases != nil { + m.Aliases = make([]string, len(o.Aliases)) + copy(m.Aliases, o.Aliases) + } + + if o.DriverAttachmentOpts != nil { + m.DriverAttachmentOpts = make(map[string]string, len(o.DriverAttachmentOpts)) + for k, v := range o.DriverAttachmentOpts { + m.DriverAttachmentOpts[k] = v + } + } + +} + +func (m *Network) Copy() *Network { + if m == nil { + return nil + } + o := &Network{} + o.CopyFrom(m) + return o +} + +func (m *Network) CopyFrom(src interface{}) { + + o := src.(*Network) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + if o.DriverState != nil { + m.DriverState = &Driver{} + deepcopy.Copy(m.DriverState, o.DriverState) + } + if o.IPAM != nil { + m.IPAM = &IPAMOptions{} + deepcopy.Copy(m.IPAM, o.IPAM) + } +} + +func (m *Cluster) Copy() *Cluster { + if m == nil { + return nil + } + o := &Cluster{} + o.CopyFrom(m) + return o +} + +func (m *Cluster) CopyFrom(src interface{}) { + + o := src.(*Cluster) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + deepcopy.Copy(&m.RootCA, &o.RootCA) + if o.NetworkBootstrapKeys != nil { + m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys)) + for i := range m.NetworkBootstrapKeys { + m.NetworkBootstrapKeys[i] = &EncryptionKey{} + deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) + } + } + + if o.BlacklistedCertificates != nil { + m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate, len(o.BlacklistedCertificates)) + for k, v := range o.BlacklistedCertificates { + m.BlacklistedCertificates[k] = &BlacklistedCertificate{} + deepcopy.Copy(m.BlacklistedCertificates[k], v) + } + } + + if o.UnlockKeys != nil { + m.UnlockKeys = make([]*EncryptionKey, len(o.UnlockKeys)) + for i := range m.UnlockKeys { + m.UnlockKeys[i] = &EncryptionKey{} + deepcopy.Copy(m.UnlockKeys[i], o.UnlockKeys[i]) + } + } + +} + +func (m *Secret) Copy() *Secret { + if m == nil { + return nil + } + o := &Secret{} + o.CopyFrom(m) + return o +} + +func (m *Secret) CopyFrom(src interface{}) { + + o := src.(*Secret) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) +} + +func (m *Config) Copy() *Config { + if m == nil { + return nil + } + o := &Config{} + o.CopyFrom(m) + return o +} + +func (m *Config) CopyFrom(src interface{}) { + + o := src.(*Config) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) +} + +func (m *Resource) Copy() *Resource { + if m == nil { + return nil + } + o := &Resource{} + o.CopyFrom(m) + return o +} + +func (m *Resource) CopyFrom(src interface{}) { + + o := src.(*Resource) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.Payload != nil { + m.Payload = &google_protobuf3.Any{} + deepcopy.Copy(m.Payload, o.Payload) + } +} + +func (m *Extension) Copy() *Extension { + if m == nil { + return nil + } + o := &Extension{} + o.CopyFrom(m) + return o +} + +func (m *Extension) CopyFrom(src interface{}) { + + o := src.(*Extension) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Annotations, &o.Annotations) +} + +func (m *Meta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Version.Size())) + n1, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.CreatedAt != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.CreatedAt.Size())) + n2, err := m.CreatedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.UpdatedAt != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.UpdatedAt.Size())) + n3, err := m.UpdatedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *Node) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n4, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.Description != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Description.Size())) + n6, err := m.Description.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Status.Size())) + n7, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + if m.ManagerStatus != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.ManagerStatus.Size())) + n8, err := m.ManagerStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Attachment != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Attachment.Size())) + n9, err := m.Attachment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + dAtA[i] = 0x42 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Certificate.Size())) + n10, err := m.Certificate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + if m.Role != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Role)) + } + if len(m.Attachments) > 0 { + for _, msg := range m.Attachments { + dAtA[i] = 0x52 + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n11, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n12, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + if m.Endpoint != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Endpoint.Size())) + n13, err := m.Endpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.UpdateStatus != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.UpdateStatus.Size())) + n14, err := m.UpdateStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.PreviousSpec != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.PreviousSpec.Size())) + n15, err := m.PreviousSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.SpecVersion != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.SpecVersion.Size())) + n16, err := m.SpecVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.PreviousSpecVersion != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.PreviousSpecVersion.Size())) + n17, err := m.PreviousSpecVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n18, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.VirtualIPs) > 0 { + for _, msg := range m.VirtualIPs { + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Endpoint_VirtualIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint_VirtualIP) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NetworkID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *Task) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Task) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n19, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n20, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + if len(m.ServiceID) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.Slot != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Slot)) + } + if len(m.NodeID) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + dAtA[i] = 0x3a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Annotations.Size())) + n21, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + dAtA[i] = 0x42 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.ServiceAnnotations.Size())) + n22, err := m.ServiceAnnotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + dAtA[i] = 0x4a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Status.Size())) + n23, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + if m.DesiredState != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.DesiredState)) + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x5a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Endpoint != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Endpoint.Size())) + n24, err := m.Endpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.LogDriver != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.LogDriver.Size())) + n25, err := m.LogDriver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if m.SpecVersion != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.SpecVersion.Size())) + n26, err := m.SpecVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if len(m.AssignedGenericResources) > 0 { + for _, msg := range m.AssignedGenericResources { + dAtA[i] = 0x7a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkAttachment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Network != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Network.Size())) + n27, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, _ := range m.DriverAttachmentOpts { + dAtA[i] = 0x22 + i++ + v := m.DriverAttachmentOpts[k] + mapSize := 1 + len(k) + sovObjects(uint64(len(k))) + 1 + len(v) + sovObjects(uint64(len(v))) + i = encodeVarintObjects(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *Network) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Network) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n28, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n29, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + if m.DriverState != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.DriverState.Size())) + n30, err := m.DriverState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.IPAM != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.IPAM.Size())) + n31, err := m.IPAM.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + return i, nil +} + +func (m *Cluster) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n32, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n33, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.RootCA.Size())) + n34, err := m.RootCA.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + if len(m.NetworkBootstrapKeys) > 0 { + for _, msg := range m.NetworkBootstrapKeys { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.EncryptionKeyLamportClock != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.EncryptionKeyLamportClock)) + } + if len(m.BlacklistedCertificates) > 0 { + for k, _ := range m.BlacklistedCertificates { + dAtA[i] = 0x42 + i++ + v := m.BlacklistedCertificates[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovObjects(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovObjects(uint64(len(k))) + msgSize + i = encodeVarintObjects(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(v.Size())) + n35, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + } + } + if len(m.UnlockKeys) > 0 { + for _, msg := range m.UnlockKeys { + dAtA[i] = 0x4a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.FIPS { + dAtA[i] = 0x50 + i++ + if m.FIPS { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n36, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if m.Internal { + dAtA[i] = 0x20 + i++ + if m.Internal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Config) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n38, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n39, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + return i, nil +} + +func (m *Resource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n40, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Annotations.Size())) + n41, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + if len(m.Kind) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Payload != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Payload.Size())) + n42, err := m.Payload.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + return i, nil +} + +func (m *Extension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Extension) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n43, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Annotations.Size())) + n44, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + if len(m.Description) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + return i, nil +} + +func encodeVarintObjects(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *Meta) Size() (n int) { + var l int + _ = l + l = m.Version.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.CreatedAt != nil { + l = m.CreatedAt.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.UpdatedAt != nil { + l = m.UpdatedAt.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Node) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Description != nil { + l = m.Description.Size() + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Status.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.ManagerStatus != nil { + l = m.ManagerStatus.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.Attachment != nil { + l = m.Attachment.Size() + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Certificate.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Role != 0 { + n += 1 + sovObjects(uint64(m.Role)) + } + if len(m.Attachments) > 0 { + for _, e := range m.Attachments { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + return n +} + +func (m *Service) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Endpoint != nil { + l = m.Endpoint.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.UpdateStatus != nil { + l = m.UpdateStatus.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.PreviousSpec != nil { + l = m.PreviousSpec.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.SpecVersion != nil { + l = m.SpecVersion.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.PreviousSpecVersion != nil { + l = m.PreviousSpecVersion.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Endpoint) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if len(m.VirtualIPs) > 0 { + for _, e := range m.VirtualIPs { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + return n +} + +func (m *Endpoint_VirtualIP) Size() (n int) { + var l int + _ = l + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Task) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + if m.Slot != 0 { + n += 1 + sovObjects(uint64(m.Slot)) + } + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Annotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.ServiceAnnotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.DesiredState != 0 { + n += 1 + sovObjects(uint64(m.DesiredState)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.Endpoint != nil { + l = m.Endpoint.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.LogDriver != nil { + l = m.LogDriver.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.SpecVersion != nil { + l = m.SpecVersion.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if len(m.AssignedGenericResources) > 0 { + for _, e := range m.AssignedGenericResources { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + return n +} + +func (m *NetworkAttachment) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovObjects(uint64(l)) + } + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + l = len(s) + n += 1 + l + sovObjects(uint64(l)) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, v := range m.DriverAttachmentOpts { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovObjects(uint64(len(k))) + 1 + len(v) + sovObjects(uint64(len(v))) + n += mapEntrySize + 1 + sovObjects(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Network) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.DriverState != nil { + l = m.DriverState.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.IPAM != nil { + l = m.IPAM.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Cluster) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.RootCA.Size() + n += 1 + l + sovObjects(uint64(l)) + if len(m.NetworkBootstrapKeys) > 0 { + for _, e := range m.NetworkBootstrapKeys { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.EncryptionKeyLamportClock != 0 { + n += 1 + sovObjects(uint64(m.EncryptionKeyLamportClock)) + } + if len(m.BlacklistedCertificates) > 0 { + for k, v := range m.BlacklistedCertificates { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovObjects(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovObjects(uint64(len(k))) + l + n += mapEntrySize + 1 + sovObjects(uint64(mapEntrySize)) + } + } + if len(m.UnlockKeys) > 0 { + for _, e := range m.UnlockKeys { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.FIPS { + n += 2 + } + return n +} + +func (m *Secret) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Internal { + n += 2 + } + return n +} + +func (m *Config) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + return n +} + +func (m *Resource) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Annotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + if m.Payload != nil { + l = m.Payload.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Extension) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Annotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = len(m.Description) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func sovObjects(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozObjects(x uint64) (n int) { + return sovObjects(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +type NodeCheckFunc func(t1, t2 *Node) bool + +type EventNode interface { + IsEventNode() bool +} + +type EventCreateNode struct { + Node *Node + Checks []NodeCheckFunc +} + +func (e EventCreateNode) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateNode) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Node, typedEvent.Node) { + return false + } + } + return true +} + +func (e EventCreateNode) IsEventCreate() bool { + return true +} + +func (e EventCreateNode) IsEventNode() bool { + return true +} + +type EventUpdateNode struct { + Node *Node + OldNode *Node + Checks []NodeCheckFunc +} + +func (e EventUpdateNode) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateNode) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Node, typedEvent.Node) { + return false + } + } + return true +} + +func (e EventUpdateNode) IsEventUpdate() bool { + return true +} + +func (e EventUpdateNode) IsEventNode() bool { + return true +} + +type EventDeleteNode struct { + Node *Node + Checks []NodeCheckFunc +} + +func (e EventDeleteNode) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteNode) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Node, typedEvent.Node) { + return false + } + } + return true +} + +func (e EventDeleteNode) IsEventDelete() bool { + return true +} + +func (e EventDeleteNode) IsEventNode() bool { + return true +} + +func (m *Node) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Node) GetMeta() Meta { + return m.Meta +} + +func (m *Node) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Node) GetID() string { + return m.ID +} + +func (m *Node) EventCreate() Event { + return EventCreateNode{Node: m} +} + +func (m *Node) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateNode{Node: m, OldNode: oldObject.(*Node)} + } else { + return EventUpdateNode{Node: m} + } +} + +func (m *Node) EventDelete() Event { + return EventDeleteNode{Node: m} +} + +func NodeCheckID(v1, v2 *Node) bool { + return v1.ID == v2.ID +} + +func NodeCheckIDPrefix(v1, v2 *Node) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func NodeCheckName(v1, v2 *Node) bool { + if v1.Description == nil || v2.Description == nil { + return false + } + return v1.Description.Hostname == v2.Description.Hostname +} + +func NodeCheckNamePrefix(v1, v2 *Node) bool { + if v1.Description == nil || v2.Description == nil { + return false + } + return strings.HasPrefix(v2.Description.Hostname, v1.Description.Hostname) +} + +func NodeCheckCustom(v1, v2 *Node) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func NodeCheckCustomPrefix(v1, v2 *Node) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func NodeCheckRole(v1, v2 *Node) bool { + return v1.Role == v2.Role +} + +func NodeCheckMembership(v1, v2 *Node) bool { + return v1.Spec.Membership == v2.Spec.Membership +} + +func ConvertNodeWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Node + checkFuncs []NodeCheckFunc + hasRole bool + hasMembership bool + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, NodeCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, NodeCheckIDPrefix) + case *SelectBy_Name: + if m.Description != nil { + return nil, errConflictingFilters + } + m.Description = &NodeDescription{Hostname: v.Name} + checkFuncs = append(checkFuncs, NodeCheckName) + case *SelectBy_NamePrefix: + if m.Description != nil { + return nil, errConflictingFilters + } + m.Description = &NodeDescription{Hostname: v.NamePrefix} + checkFuncs = append(checkFuncs, NodeCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, NodeCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, NodeCheckCustomPrefix) + case *SelectBy_Role: + if hasRole { + return nil, errConflictingFilters + } + hasRole = true + m.Role = v.Role + checkFuncs = append(checkFuncs, NodeCheckRole) + case *SelectBy_Membership: + if hasMembership { + return nil, errConflictingFilters + } + hasMembership = true + m.Spec.Membership = v.Membership + checkFuncs = append(checkFuncs, NodeCheckMembership) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateNode{Node: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateNode{Node: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteNode{Node: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type NodeIndexerByID struct{} + +func (indexer NodeIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NodeIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NodeIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Node) + return true, []byte(m.ID + "\x00"), nil +} + +type NodeIndexerByName struct{} + +func (indexer NodeIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NodeIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NodeIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Node) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type NodeCustomIndexer struct{} + +func (indexer NodeCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NodeCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NodeCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Node) + return customIndexer("", &m.Spec.Annotations) +} + +type ServiceCheckFunc func(t1, t2 *Service) bool + +type EventService interface { + IsEventService() bool +} + +type EventCreateService struct { + Service *Service + Checks []ServiceCheckFunc +} + +func (e EventCreateService) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateService) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Service, typedEvent.Service) { + return false + } + } + return true +} + +func (e EventCreateService) IsEventCreate() bool { + return true +} + +func (e EventCreateService) IsEventService() bool { + return true +} + +type EventUpdateService struct { + Service *Service + OldService *Service + Checks []ServiceCheckFunc +} + +func (e EventUpdateService) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateService) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Service, typedEvent.Service) { + return false + } + } + return true +} + +func (e EventUpdateService) IsEventUpdate() bool { + return true +} + +func (e EventUpdateService) IsEventService() bool { + return true +} + +type EventDeleteService struct { + Service *Service + Checks []ServiceCheckFunc +} + +func (e EventDeleteService) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteService) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Service, typedEvent.Service) { + return false + } + } + return true +} + +func (e EventDeleteService) IsEventDelete() bool { + return true +} + +func (e EventDeleteService) IsEventService() bool { + return true +} + +func (m *Service) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Service) GetMeta() Meta { + return m.Meta +} + +func (m *Service) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Service) GetID() string { + return m.ID +} + +func (m *Service) EventCreate() Event { + return EventCreateService{Service: m} +} + +func (m *Service) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateService{Service: m, OldService: oldObject.(*Service)} + } else { + return EventUpdateService{Service: m} + } +} + +func (m *Service) EventDelete() Event { + return EventDeleteService{Service: m} +} + +func ServiceCheckID(v1, v2 *Service) bool { + return v1.ID == v2.ID +} + +func ServiceCheckIDPrefix(v1, v2 *Service) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ServiceCheckName(v1, v2 *Service) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func ServiceCheckNamePrefix(v1, v2 *Service) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func ServiceCheckCustom(v1, v2 *Service) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ServiceCheckCustomPrefix(v1, v2 *Service) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertServiceWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Service + checkFuncs []ServiceCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ServiceCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ServiceCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ServiceCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ServiceCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ServiceCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ServiceCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateService{Service: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateService{Service: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteService{Service: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ServiceIndexerByID struct{} + +func (indexer ServiceIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ServiceIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ServiceIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Service) + return true, []byte(m.ID + "\x00"), nil +} + +type ServiceIndexerByName struct{} + +func (indexer ServiceIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ServiceIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ServiceIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Service) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ServiceCustomIndexer struct{} + +func (indexer ServiceCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ServiceCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ServiceCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Service) + return customIndexer("", &m.Spec.Annotations) +} + +type TaskCheckFunc func(t1, t2 *Task) bool + +type EventTask interface { + IsEventTask() bool +} + +type EventCreateTask struct { + Task *Task + Checks []TaskCheckFunc +} + +func (e EventCreateTask) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateTask) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Task, typedEvent.Task) { + return false + } + } + return true +} + +func (e EventCreateTask) IsEventCreate() bool { + return true +} + +func (e EventCreateTask) IsEventTask() bool { + return true +} + +type EventUpdateTask struct { + Task *Task + OldTask *Task + Checks []TaskCheckFunc +} + +func (e EventUpdateTask) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateTask) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Task, typedEvent.Task) { + return false + } + } + return true +} + +func (e EventUpdateTask) IsEventUpdate() bool { + return true +} + +func (e EventUpdateTask) IsEventTask() bool { + return true +} + +type EventDeleteTask struct { + Task *Task + Checks []TaskCheckFunc +} + +func (e EventDeleteTask) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteTask) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Task, typedEvent.Task) { + return false + } + } + return true +} + +func (e EventDeleteTask) IsEventDelete() bool { + return true +} + +func (e EventDeleteTask) IsEventTask() bool { + return true +} + +func (m *Task) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Task) GetMeta() Meta { + return m.Meta +} + +func (m *Task) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Task) GetID() string { + return m.ID +} + +func (m *Task) EventCreate() Event { + return EventCreateTask{Task: m} +} + +func (m *Task) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateTask{Task: m, OldTask: oldObject.(*Task)} + } else { + return EventUpdateTask{Task: m} + } +} + +func (m *Task) EventDelete() Event { + return EventDeleteTask{Task: m} +} + +func TaskCheckID(v1, v2 *Task) bool { + return v1.ID == v2.ID +} + +func TaskCheckIDPrefix(v1, v2 *Task) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func TaskCheckName(v1, v2 *Task) bool { + return v1.Annotations.Name == v2.Annotations.Name +} + +func TaskCheckNamePrefix(v1, v2 *Task) bool { + return strings.HasPrefix(v2.Annotations.Name, v1.Annotations.Name) +} + +func TaskCheckCustom(v1, v2 *Task) bool { + return checkCustom(v1.Annotations, v2.Annotations) +} + +func TaskCheckCustomPrefix(v1, v2 *Task) bool { + return checkCustomPrefix(v1.Annotations, v2.Annotations) +} + +func TaskCheckNodeID(v1, v2 *Task) bool { + return v1.NodeID == v2.NodeID +} + +func TaskCheckServiceID(v1, v2 *Task) bool { + return v1.ServiceID == v2.ServiceID +} + +func TaskCheckSlot(v1, v2 *Task) bool { + return v1.Slot == v2.Slot +} + +func TaskCheckDesiredState(v1, v2 *Task) bool { + return v1.DesiredState == v2.DesiredState +} + +func ConvertTaskWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Task + checkFuncs []TaskCheckFunc + hasDesiredState bool + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, TaskCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, TaskCheckIDPrefix) + case *SelectBy_Name: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, TaskCheckName) + case *SelectBy_NamePrefix: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, TaskCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, TaskCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, TaskCheckCustomPrefix) + case *SelectBy_ServiceID: + if m.ServiceID != "" { + return nil, errConflictingFilters + } + m.ServiceID = v.ServiceID + checkFuncs = append(checkFuncs, TaskCheckServiceID) + case *SelectBy_NodeID: + if m.NodeID != "" { + return nil, errConflictingFilters + } + m.NodeID = v.NodeID + checkFuncs = append(checkFuncs, TaskCheckNodeID) + case *SelectBy_Slot: + if m.Slot != 0 || m.ServiceID != "" { + return nil, errConflictingFilters + } + m.ServiceID = v.Slot.ServiceID + m.Slot = v.Slot.Slot + checkFuncs = append(checkFuncs, TaskCheckNodeID, TaskCheckSlot) + case *SelectBy_DesiredState: + if hasDesiredState { + return nil, errConflictingFilters + } + hasDesiredState = true + m.DesiredState = v.DesiredState + checkFuncs = append(checkFuncs, TaskCheckDesiredState) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateTask{Task: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateTask{Task: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteTask{Task: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type TaskIndexerByID struct{} + +func (indexer TaskIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer TaskIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer TaskIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Task) + return true, []byte(m.ID + "\x00"), nil +} + +type TaskIndexerByName struct{} + +func (indexer TaskIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer TaskIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer TaskIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Task) + val := m.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type TaskCustomIndexer struct{} + +func (indexer TaskCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer TaskCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer TaskCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Task) + return customIndexer("", &m.Annotations) +} + +type NetworkCheckFunc func(t1, t2 *Network) bool + +type EventNetwork interface { + IsEventNetwork() bool +} + +type EventCreateNetwork struct { + Network *Network + Checks []NetworkCheckFunc +} + +func (e EventCreateNetwork) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateNetwork) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Network, typedEvent.Network) { + return false + } + } + return true +} + +func (e EventCreateNetwork) IsEventCreate() bool { + return true +} + +func (e EventCreateNetwork) IsEventNetwork() bool { + return true +} + +type EventUpdateNetwork struct { + Network *Network + OldNetwork *Network + Checks []NetworkCheckFunc +} + +func (e EventUpdateNetwork) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateNetwork) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Network, typedEvent.Network) { + return false + } + } + return true +} + +func (e EventUpdateNetwork) IsEventUpdate() bool { + return true +} + +func (e EventUpdateNetwork) IsEventNetwork() bool { + return true +} + +type EventDeleteNetwork struct { + Network *Network + Checks []NetworkCheckFunc +} + +func (e EventDeleteNetwork) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteNetwork) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Network, typedEvent.Network) { + return false + } + } + return true +} + +func (e EventDeleteNetwork) IsEventDelete() bool { + return true +} + +func (e EventDeleteNetwork) IsEventNetwork() bool { + return true +} + +func (m *Network) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Network) GetMeta() Meta { + return m.Meta +} + +func (m *Network) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Network) GetID() string { + return m.ID +} + +func (m *Network) EventCreate() Event { + return EventCreateNetwork{Network: m} +} + +func (m *Network) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateNetwork{Network: m, OldNetwork: oldObject.(*Network)} + } else { + return EventUpdateNetwork{Network: m} + } +} + +func (m *Network) EventDelete() Event { + return EventDeleteNetwork{Network: m} +} + +func NetworkCheckID(v1, v2 *Network) bool { + return v1.ID == v2.ID +} + +func NetworkCheckIDPrefix(v1, v2 *Network) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func NetworkCheckName(v1, v2 *Network) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func NetworkCheckNamePrefix(v1, v2 *Network) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func NetworkCheckCustom(v1, v2 *Network) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func NetworkCheckCustomPrefix(v1, v2 *Network) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertNetworkWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Network + checkFuncs []NetworkCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, NetworkCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, NetworkCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, NetworkCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, NetworkCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, NetworkCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, NetworkCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateNetwork{Network: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateNetwork{Network: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteNetwork{Network: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type NetworkIndexerByID struct{} + +func (indexer NetworkIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NetworkIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NetworkIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Network) + return true, []byte(m.ID + "\x00"), nil +} + +type NetworkIndexerByName struct{} + +func (indexer NetworkIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NetworkIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NetworkIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Network) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type NetworkCustomIndexer struct{} + +func (indexer NetworkCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NetworkCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NetworkCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Network) + return customIndexer("", &m.Spec.Annotations) +} + +type ClusterCheckFunc func(t1, t2 *Cluster) bool + +type EventCluster interface { + IsEventCluster() bool +} + +type EventCreateCluster struct { + Cluster *Cluster + Checks []ClusterCheckFunc +} + +func (e EventCreateCluster) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateCluster) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Cluster, typedEvent.Cluster) { + return false + } + } + return true +} + +func (e EventCreateCluster) IsEventCreate() bool { + return true +} + +func (e EventCreateCluster) IsEventCluster() bool { + return true +} + +type EventUpdateCluster struct { + Cluster *Cluster + OldCluster *Cluster + Checks []ClusterCheckFunc +} + +func (e EventUpdateCluster) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateCluster) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Cluster, typedEvent.Cluster) { + return false + } + } + return true +} + +func (e EventUpdateCluster) IsEventUpdate() bool { + return true +} + +func (e EventUpdateCluster) IsEventCluster() bool { + return true +} + +type EventDeleteCluster struct { + Cluster *Cluster + Checks []ClusterCheckFunc +} + +func (e EventDeleteCluster) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteCluster) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Cluster, typedEvent.Cluster) { + return false + } + } + return true +} + +func (e EventDeleteCluster) IsEventDelete() bool { + return true +} + +func (e EventDeleteCluster) IsEventCluster() bool { + return true +} + +func (m *Cluster) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Cluster) GetMeta() Meta { + return m.Meta +} + +func (m *Cluster) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Cluster) GetID() string { + return m.ID +} + +func (m *Cluster) EventCreate() Event { + return EventCreateCluster{Cluster: m} +} + +func (m *Cluster) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateCluster{Cluster: m, OldCluster: oldObject.(*Cluster)} + } else { + return EventUpdateCluster{Cluster: m} + } +} + +func (m *Cluster) EventDelete() Event { + return EventDeleteCluster{Cluster: m} +} + +func ClusterCheckID(v1, v2 *Cluster) bool { + return v1.ID == v2.ID +} + +func ClusterCheckIDPrefix(v1, v2 *Cluster) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ClusterCheckName(v1, v2 *Cluster) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func ClusterCheckNamePrefix(v1, v2 *Cluster) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func ClusterCheckCustom(v1, v2 *Cluster) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ClusterCheckCustomPrefix(v1, v2 *Cluster) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertClusterWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Cluster + checkFuncs []ClusterCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ClusterCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ClusterCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ClusterCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ClusterCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ClusterCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ClusterCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateCluster{Cluster: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateCluster{Cluster: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteCluster{Cluster: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ClusterIndexerByID struct{} + +func (indexer ClusterIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ClusterIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ClusterIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Cluster) + return true, []byte(m.ID + "\x00"), nil +} + +type ClusterIndexerByName struct{} + +func (indexer ClusterIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ClusterIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ClusterIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Cluster) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ClusterCustomIndexer struct{} + +func (indexer ClusterCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ClusterCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ClusterCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Cluster) + return customIndexer("", &m.Spec.Annotations) +} + +type SecretCheckFunc func(t1, t2 *Secret) bool + +type EventSecret interface { + IsEventSecret() bool +} + +type EventCreateSecret struct { + Secret *Secret + Checks []SecretCheckFunc +} + +func (e EventCreateSecret) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateSecret) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Secret, typedEvent.Secret) { + return false + } + } + return true +} + +func (e EventCreateSecret) IsEventCreate() bool { + return true +} + +func (e EventCreateSecret) IsEventSecret() bool { + return true +} + +type EventUpdateSecret struct { + Secret *Secret + OldSecret *Secret + Checks []SecretCheckFunc +} + +func (e EventUpdateSecret) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateSecret) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Secret, typedEvent.Secret) { + return false + } + } + return true +} + +func (e EventUpdateSecret) IsEventUpdate() bool { + return true +} + +func (e EventUpdateSecret) IsEventSecret() bool { + return true +} + +type EventDeleteSecret struct { + Secret *Secret + Checks []SecretCheckFunc +} + +func (e EventDeleteSecret) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteSecret) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Secret, typedEvent.Secret) { + return false + } + } + return true +} + +func (e EventDeleteSecret) IsEventDelete() bool { + return true +} + +func (e EventDeleteSecret) IsEventSecret() bool { + return true +} + +func (m *Secret) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Secret) GetMeta() Meta { + return m.Meta +} + +func (m *Secret) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Secret) GetID() string { + return m.ID +} + +func (m *Secret) EventCreate() Event { + return EventCreateSecret{Secret: m} +} + +func (m *Secret) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateSecret{Secret: m, OldSecret: oldObject.(*Secret)} + } else { + return EventUpdateSecret{Secret: m} + } +} + +func (m *Secret) EventDelete() Event { + return EventDeleteSecret{Secret: m} +} + +func SecretCheckID(v1, v2 *Secret) bool { + return v1.ID == v2.ID +} + +func SecretCheckIDPrefix(v1, v2 *Secret) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func SecretCheckName(v1, v2 *Secret) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func SecretCheckNamePrefix(v1, v2 *Secret) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func SecretCheckCustom(v1, v2 *Secret) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func SecretCheckCustomPrefix(v1, v2 *Secret) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertSecretWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Secret + checkFuncs []SecretCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, SecretCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, SecretCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, SecretCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, SecretCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, SecretCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, SecretCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateSecret{Secret: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateSecret{Secret: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteSecret{Secret: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type SecretIndexerByID struct{} + +func (indexer SecretIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer SecretIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer SecretIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Secret) + return true, []byte(m.ID + "\x00"), nil +} + +type SecretIndexerByName struct{} + +func (indexer SecretIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer SecretIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer SecretIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Secret) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type SecretCustomIndexer struct{} + +func (indexer SecretCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer SecretCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer SecretCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Secret) + return customIndexer("", &m.Spec.Annotations) +} + +type ConfigCheckFunc func(t1, t2 *Config) bool + +type EventConfig interface { + IsEventConfig() bool +} + +type EventCreateConfig struct { + Config *Config + Checks []ConfigCheckFunc +} + +func (e EventCreateConfig) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateConfig) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Config, typedEvent.Config) { + return false + } + } + return true +} + +func (e EventCreateConfig) IsEventCreate() bool { + return true +} + +func (e EventCreateConfig) IsEventConfig() bool { + return true +} + +type EventUpdateConfig struct { + Config *Config + OldConfig *Config + Checks []ConfigCheckFunc +} + +func (e EventUpdateConfig) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateConfig) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Config, typedEvent.Config) { + return false + } + } + return true +} + +func (e EventUpdateConfig) IsEventUpdate() bool { + return true +} + +func (e EventUpdateConfig) IsEventConfig() bool { + return true +} + +type EventDeleteConfig struct { + Config *Config + Checks []ConfigCheckFunc +} + +func (e EventDeleteConfig) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteConfig) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Config, typedEvent.Config) { + return false + } + } + return true +} + +func (e EventDeleteConfig) IsEventDelete() bool { + return true +} + +func (e EventDeleteConfig) IsEventConfig() bool { + return true +} + +func (m *Config) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Config) GetMeta() Meta { + return m.Meta +} + +func (m *Config) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Config) GetID() string { + return m.ID +} + +func (m *Config) EventCreate() Event { + return EventCreateConfig{Config: m} +} + +func (m *Config) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateConfig{Config: m, OldConfig: oldObject.(*Config)} + } else { + return EventUpdateConfig{Config: m} + } +} + +func (m *Config) EventDelete() Event { + return EventDeleteConfig{Config: m} +} + +func ConfigCheckID(v1, v2 *Config) bool { + return v1.ID == v2.ID +} + +func ConfigCheckIDPrefix(v1, v2 *Config) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ConfigCheckName(v1, v2 *Config) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func ConfigCheckNamePrefix(v1, v2 *Config) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func ConfigCheckCustom(v1, v2 *Config) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConfigCheckCustomPrefix(v1, v2 *Config) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertConfigWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Config + checkFuncs []ConfigCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ConfigCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ConfigCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ConfigCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ConfigCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ConfigCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ConfigCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateConfig{Config: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateConfig{Config: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteConfig{Config: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ConfigIndexerByID struct{} + +func (indexer ConfigIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ConfigIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ConfigIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Config) + return true, []byte(m.ID + "\x00"), nil +} + +type ConfigIndexerByName struct{} + +func (indexer ConfigIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ConfigIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ConfigIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Config) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ConfigCustomIndexer struct{} + +func (indexer ConfigCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ConfigCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ConfigCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Config) + return customIndexer("", &m.Spec.Annotations) +} + +type ResourceCheckFunc func(t1, t2 *Resource) bool + +type EventResource interface { + IsEventResource() bool +} + +type EventCreateResource struct { + Resource *Resource + Checks []ResourceCheckFunc +} + +func (e EventCreateResource) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateResource) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Resource, typedEvent.Resource) { + return false + } + } + return true +} + +func (e EventCreateResource) IsEventCreate() bool { + return true +} + +func (e EventCreateResource) IsEventResource() bool { + return true +} + +type EventUpdateResource struct { + Resource *Resource + OldResource *Resource + Checks []ResourceCheckFunc +} + +func (e EventUpdateResource) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateResource) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Resource, typedEvent.Resource) { + return false + } + } + return true +} + +func (e EventUpdateResource) IsEventUpdate() bool { + return true +} + +func (e EventUpdateResource) IsEventResource() bool { + return true +} + +type EventDeleteResource struct { + Resource *Resource + Checks []ResourceCheckFunc +} + +func (e EventDeleteResource) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteResource) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Resource, typedEvent.Resource) { + return false + } + } + return true +} + +func (e EventDeleteResource) IsEventDelete() bool { + return true +} + +func (e EventDeleteResource) IsEventResource() bool { + return true +} + +func (m *Resource) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Resource) GetMeta() Meta { + return m.Meta +} + +func (m *Resource) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Resource) GetID() string { + return m.ID +} + +func (m *Resource) EventCreate() Event { + return EventCreateResource{Resource: m} +} + +func (m *Resource) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateResource{Resource: m, OldResource: oldObject.(*Resource)} + } else { + return EventUpdateResource{Resource: m} + } +} + +func (m *Resource) EventDelete() Event { + return EventDeleteResource{Resource: m} +} + +func ResourceCheckID(v1, v2 *Resource) bool { + return v1.ID == v2.ID +} + +func ResourceCheckIDPrefix(v1, v2 *Resource) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ResourceCheckName(v1, v2 *Resource) bool { + return v1.Annotations.Name == v2.Annotations.Name +} + +func ResourceCheckNamePrefix(v1, v2 *Resource) bool { + return strings.HasPrefix(v2.Annotations.Name, v1.Annotations.Name) +} + +func ResourceCheckCustom(v1, v2 *Resource) bool { + return checkCustom(v1.Annotations, v2.Annotations) +} + +func ResourceCheckCustomPrefix(v1, v2 *Resource) bool { + return checkCustomPrefix(v1.Annotations, v2.Annotations) +} + +func ResourceCheckKind(v1, v2 *Resource) bool { + return v1.Kind == v2.Kind +} + +func ConvertResourceWatch(action WatchActionKind, filters []*SelectBy, kind string) ([]Event, error) { + var ( + m Resource + checkFuncs []ResourceCheckFunc + ) + m.Kind = kind + checkFuncs = append(checkFuncs, ResourceCheckKind) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ResourceCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ResourceCheckIDPrefix) + case *SelectBy_Name: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ResourceCheckName) + case *SelectBy_NamePrefix: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ResourceCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ResourceCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ResourceCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateResource{Resource: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateResource{Resource: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteResource{Resource: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ResourceIndexerByID struct{} + +func (indexer ResourceIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ResourceIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ResourceIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Resource) + return true, []byte(m.ID + "\x00"), nil +} + +type ResourceIndexerByName struct{} + +func (indexer ResourceIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ResourceIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ResourceIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Resource) + val := m.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ResourceCustomIndexer struct{} + +func (indexer ResourceCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ResourceCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ResourceCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Resource) + return customIndexer("", &m.Annotations) +} + +type ExtensionCheckFunc func(t1, t2 *Extension) bool + +type EventExtension interface { + IsEventExtension() bool +} + +type EventCreateExtension struct { + Extension *Extension + Checks []ExtensionCheckFunc +} + +func (e EventCreateExtension) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateExtension) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Extension, typedEvent.Extension) { + return false + } + } + return true +} + +func (e EventCreateExtension) IsEventCreate() bool { + return true +} + +func (e EventCreateExtension) IsEventExtension() bool { + return true +} + +type EventUpdateExtension struct { + Extension *Extension + OldExtension *Extension + Checks []ExtensionCheckFunc +} + +func (e EventUpdateExtension) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateExtension) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Extension, typedEvent.Extension) { + return false + } + } + return true +} + +func (e EventUpdateExtension) IsEventUpdate() bool { + return true +} + +func (e EventUpdateExtension) IsEventExtension() bool { + return true +} + +type EventDeleteExtension struct { + Extension *Extension + Checks []ExtensionCheckFunc +} + +func (e EventDeleteExtension) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteExtension) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Extension, typedEvent.Extension) { + return false + } + } + return true +} + +func (e EventDeleteExtension) IsEventDelete() bool { + return true +} + +func (e EventDeleteExtension) IsEventExtension() bool { + return true +} + +func (m *Extension) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Extension) GetMeta() Meta { + return m.Meta +} + +func (m *Extension) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Extension) GetID() string { + return m.ID +} + +func (m *Extension) EventCreate() Event { + return EventCreateExtension{Extension: m} +} + +func (m *Extension) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateExtension{Extension: m, OldExtension: oldObject.(*Extension)} + } else { + return EventUpdateExtension{Extension: m} + } +} + +func (m *Extension) EventDelete() Event { + return EventDeleteExtension{Extension: m} +} + +func ExtensionCheckID(v1, v2 *Extension) bool { + return v1.ID == v2.ID +} + +func ExtensionCheckIDPrefix(v1, v2 *Extension) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ExtensionCheckName(v1, v2 *Extension) bool { + return v1.Annotations.Name == v2.Annotations.Name +} + +func ExtensionCheckNamePrefix(v1, v2 *Extension) bool { + return strings.HasPrefix(v2.Annotations.Name, v1.Annotations.Name) +} + +func ExtensionCheckCustom(v1, v2 *Extension) bool { + return checkCustom(v1.Annotations, v2.Annotations) +} + +func ExtensionCheckCustomPrefix(v1, v2 *Extension) bool { + return checkCustomPrefix(v1.Annotations, v2.Annotations) +} + +func ConvertExtensionWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Extension + checkFuncs []ExtensionCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ExtensionCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ExtensionCheckIDPrefix) + case *SelectBy_Name: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ExtensionCheckName) + case *SelectBy_NamePrefix: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ExtensionCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ExtensionCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ExtensionCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateExtension{Extension: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateExtension{Extension: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteExtension{Extension: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ExtensionIndexerByID struct{} + +func (indexer ExtensionIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ExtensionIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ExtensionIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Extension) + return true, []byte(m.ID + "\x00"), nil +} + +type ExtensionIndexerByName struct{} + +func (indexer ExtensionIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ExtensionIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ExtensionIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Extension) + val := m.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ExtensionCustomIndexer struct{} + +func (indexer ExtensionCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ExtensionCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ExtensionCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Extension) + return customIndexer("", &m.Annotations) +} +func NewStoreAction(c Event) (StoreAction, error) { + var sa StoreAction + switch v := c.(type) { + case EventCreateNode: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Node{Node: v.Node} + case EventUpdateNode: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Node{Node: v.Node} + case EventDeleteNode: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Node{Node: v.Node} + case EventCreateService: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Service{Service: v.Service} + case EventUpdateService: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Service{Service: v.Service} + case EventDeleteService: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Service{Service: v.Service} + case EventCreateTask: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Task{Task: v.Task} + case EventUpdateTask: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Task{Task: v.Task} + case EventDeleteTask: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Task{Task: v.Task} + case EventCreateNetwork: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Network{Network: v.Network} + case EventUpdateNetwork: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Network{Network: v.Network} + case EventDeleteNetwork: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Network{Network: v.Network} + case EventCreateCluster: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Cluster{Cluster: v.Cluster} + case EventUpdateCluster: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Cluster{Cluster: v.Cluster} + case EventDeleteCluster: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Cluster{Cluster: v.Cluster} + case EventCreateSecret: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Secret{Secret: v.Secret} + case EventUpdateSecret: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Secret{Secret: v.Secret} + case EventDeleteSecret: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Secret{Secret: v.Secret} + case EventCreateConfig: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Config{Config: v.Config} + case EventUpdateConfig: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Config{Config: v.Config} + case EventDeleteConfig: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Config{Config: v.Config} + case EventCreateResource: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Resource{Resource: v.Resource} + case EventUpdateResource: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Resource{Resource: v.Resource} + case EventDeleteResource: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Resource{Resource: v.Resource} + case EventCreateExtension: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Extension{Extension: v.Extension} + case EventUpdateExtension: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Extension{Extension: v.Extension} + case EventDeleteExtension: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Extension{Extension: v.Extension} + default: + return StoreAction{}, errUnknownStoreAction + } + return sa, nil +} + +func EventFromStoreAction(sa StoreAction, oldObject StoreObject) (Event, error) { + switch v := sa.Target.(type) { + case *StoreAction_Node: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateNode{Node: v.Node}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateNode{Node: v.Node, OldNode: oldObject.(*Node)}, nil + } else { + return EventUpdateNode{Node: v.Node}, nil + } + case StoreActionKindRemove: + return EventDeleteNode{Node: v.Node}, nil + } + case *StoreAction_Service: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateService{Service: v.Service}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateService{Service: v.Service, OldService: oldObject.(*Service)}, nil + } else { + return EventUpdateService{Service: v.Service}, nil + } + case StoreActionKindRemove: + return EventDeleteService{Service: v.Service}, nil + } + case *StoreAction_Task: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateTask{Task: v.Task}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateTask{Task: v.Task, OldTask: oldObject.(*Task)}, nil + } else { + return EventUpdateTask{Task: v.Task}, nil + } + case StoreActionKindRemove: + return EventDeleteTask{Task: v.Task}, nil + } + case *StoreAction_Network: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateNetwork{Network: v.Network}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateNetwork{Network: v.Network, OldNetwork: oldObject.(*Network)}, nil + } else { + return EventUpdateNetwork{Network: v.Network}, nil + } + case StoreActionKindRemove: + return EventDeleteNetwork{Network: v.Network}, nil + } + case *StoreAction_Cluster: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateCluster{Cluster: v.Cluster}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateCluster{Cluster: v.Cluster, OldCluster: oldObject.(*Cluster)}, nil + } else { + return EventUpdateCluster{Cluster: v.Cluster}, nil + } + case StoreActionKindRemove: + return EventDeleteCluster{Cluster: v.Cluster}, nil + } + case *StoreAction_Secret: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateSecret{Secret: v.Secret}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateSecret{Secret: v.Secret, OldSecret: oldObject.(*Secret)}, nil + } else { + return EventUpdateSecret{Secret: v.Secret}, nil + } + case StoreActionKindRemove: + return EventDeleteSecret{Secret: v.Secret}, nil + } + case *StoreAction_Config: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateConfig{Config: v.Config}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateConfig{Config: v.Config, OldConfig: oldObject.(*Config)}, nil + } else { + return EventUpdateConfig{Config: v.Config}, nil + } + case StoreActionKindRemove: + return EventDeleteConfig{Config: v.Config}, nil + } + case *StoreAction_Resource: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateResource{Resource: v.Resource}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateResource{Resource: v.Resource, OldResource: oldObject.(*Resource)}, nil + } else { + return EventUpdateResource{Resource: v.Resource}, nil + } + case StoreActionKindRemove: + return EventDeleteResource{Resource: v.Resource}, nil + } + case *StoreAction_Extension: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateExtension{Extension: v.Extension}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateExtension{Extension: v.Extension, OldExtension: oldObject.(*Extension)}, nil + } else { + return EventUpdateExtension{Extension: v.Extension}, nil + } + case StoreActionKindRemove: + return EventDeleteExtension{Extension: v.Extension}, nil + } + } + return nil, errUnknownStoreAction +} + +func WatchMessageEvent(c Event) *WatchMessage_Event { + switch v := c.(type) { + case EventCreateNode: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Node{Node: v.Node}}} + case EventUpdateNode: + if v.OldNode != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Node{Node: v.Node}}, OldObject: &Object{Object: &Object_Node{Node: v.OldNode}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Node{Node: v.Node}}} + } + case EventDeleteNode: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Node{Node: v.Node}}} + case EventCreateService: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Service{Service: v.Service}}} + case EventUpdateService: + if v.OldService != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Service{Service: v.Service}}, OldObject: &Object{Object: &Object_Service{Service: v.OldService}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Service{Service: v.Service}}} + } + case EventDeleteService: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Service{Service: v.Service}}} + case EventCreateTask: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Task{Task: v.Task}}} + case EventUpdateTask: + if v.OldTask != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Task{Task: v.Task}}, OldObject: &Object{Object: &Object_Task{Task: v.OldTask}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Task{Task: v.Task}}} + } + case EventDeleteTask: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Task{Task: v.Task}}} + case EventCreateNetwork: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Network{Network: v.Network}}} + case EventUpdateNetwork: + if v.OldNetwork != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Network{Network: v.Network}}, OldObject: &Object{Object: &Object_Network{Network: v.OldNetwork}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Network{Network: v.Network}}} + } + case EventDeleteNetwork: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Network{Network: v.Network}}} + case EventCreateCluster: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}} + case EventUpdateCluster: + if v.OldCluster != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}, OldObject: &Object{Object: &Object_Cluster{Cluster: v.OldCluster}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}} + } + case EventDeleteCluster: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}} + case EventCreateSecret: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}} + case EventUpdateSecret: + if v.OldSecret != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}, OldObject: &Object{Object: &Object_Secret{Secret: v.OldSecret}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}} + } + case EventDeleteSecret: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}} + case EventCreateConfig: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Config{Config: v.Config}}} + case EventUpdateConfig: + if v.OldConfig != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Config{Config: v.Config}}, OldObject: &Object{Object: &Object_Config{Config: v.OldConfig}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Config{Config: v.Config}}} + } + case EventDeleteConfig: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Config{Config: v.Config}}} + case EventCreateResource: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}} + case EventUpdateResource: + if v.OldResource != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}, OldObject: &Object{Object: &Object_Resource{Resource: v.OldResource}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}} + } + case EventDeleteResource: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}} + case EventCreateExtension: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}} + case EventUpdateExtension: + if v.OldExtension != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}, OldObject: &Object{Object: &Object_Extension{Extension: v.OldExtension}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}} + } + case EventDeleteExtension: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}} + } + return nil +} + +func ConvertWatchArgs(entries []*WatchRequest_WatchEntry) ([]Event, error) { + var events []Event + for _, entry := range entries { + var newEvents []Event + var err error + switch entry.Kind { + case "": + return nil, errNoKindSpecified + case "node": + newEvents, err = ConvertNodeWatch(entry.Action, entry.Filters) + case "service": + newEvents, err = ConvertServiceWatch(entry.Action, entry.Filters) + case "task": + newEvents, err = ConvertTaskWatch(entry.Action, entry.Filters) + case "network": + newEvents, err = ConvertNetworkWatch(entry.Action, entry.Filters) + case "cluster": + newEvents, err = ConvertClusterWatch(entry.Action, entry.Filters) + case "secret": + newEvents, err = ConvertSecretWatch(entry.Action, entry.Filters) + case "config": + newEvents, err = ConvertConfigWatch(entry.Action, entry.Filters) + default: + newEvents, err = ConvertResourceWatch(entry.Action, entry.Filters, entry.Kind) + case "extension": + newEvents, err = ConvertExtensionWatch(entry.Action, entry.Filters) + } + if err != nil { + return nil, err + } + events = append(events, newEvents...) + } + return events, nil +} + +func (this *Meta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Meta{`, + `Version:` + strings.Replace(strings.Replace(this.Version.String(), "Version", "Version", 1), `&`, ``, 1) + `,`, + `CreatedAt:` + strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `UpdatedAt:` + strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Node{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Description:` + strings.Replace(fmt.Sprintf("%v", this.Description), "NodeDescription", "NodeDescription", 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `ManagerStatus:` + strings.Replace(fmt.Sprintf("%v", this.ManagerStatus), "ManagerStatus", "ManagerStatus", 1) + `,`, + `Attachment:` + strings.Replace(fmt.Sprintf("%v", this.Attachment), "NetworkAttachment", "NetworkAttachment", 1) + `,`, + `Certificate:` + strings.Replace(strings.Replace(this.Certificate.String(), "Certificate", "Certificate", 1), `&`, ``, 1) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Attachments:` + strings.Replace(fmt.Sprintf("%v", this.Attachments), "NetworkAttachment", "NetworkAttachment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Service{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`, + `UpdateStatus:` + strings.Replace(fmt.Sprintf("%v", this.UpdateStatus), "UpdateStatus", "UpdateStatus", 1) + `,`, + `PreviousSpec:` + strings.Replace(fmt.Sprintf("%v", this.PreviousSpec), "ServiceSpec", "ServiceSpec", 1) + `,`, + `SpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.SpecVersion), "Version", "Version", 1) + `,`, + `PreviousSpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.PreviousSpecVersion), "Version", "Version", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Endpoint{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "EndpointSpec", "EndpointSpec", 1) + `,`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`, + `VirtualIPs:` + strings.Replace(fmt.Sprintf("%v", this.VirtualIPs), "Endpoint_VirtualIP", "Endpoint_VirtualIP", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoint_VirtualIP) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Endpoint_VirtualIP{`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Task{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TaskSpec", "TaskSpec", 1), `&`, ``, 1) + `,`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `Slot:` + fmt.Sprintf("%v", this.Slot) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `ServiceAnnotations:` + strings.Replace(strings.Replace(this.ServiceAnnotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TaskStatus", "TaskStatus", 1), `&`, ``, 1) + `,`, + `DesiredState:` + fmt.Sprintf("%v", this.DesiredState) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachment", "NetworkAttachment", 1) + `,`, + `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`, + `LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`, + `SpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.SpecVersion), "Version", "Version", 1) + `,`, + `AssignedGenericResources:` + strings.Replace(fmt.Sprintf("%v", this.AssignedGenericResources), "GenericResource", "GenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkAttachment) String() string { + if this == nil { + return "nil" + } + keysForDriverAttachmentOpts := make([]string, 0, len(this.DriverAttachmentOpts)) + for k, _ := range this.DriverAttachmentOpts { + keysForDriverAttachmentOpts = append(keysForDriverAttachmentOpts, k) + } + sortkeys.Strings(keysForDriverAttachmentOpts) + mapStringForDriverAttachmentOpts := "map[string]string{" + for _, k := range keysForDriverAttachmentOpts { + mapStringForDriverAttachmentOpts += fmt.Sprintf("%v: %v,", k, this.DriverAttachmentOpts[k]) + } + mapStringForDriverAttachmentOpts += "}" + s := strings.Join([]string{`&NetworkAttachment{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`, + `DriverAttachmentOpts:` + mapStringForDriverAttachmentOpts + `,`, + `}`, + }, "") + return s +} +func (this *Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Network{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkSpec", "NetworkSpec", 1), `&`, ``, 1) + `,`, + `DriverState:` + strings.Replace(fmt.Sprintf("%v", this.DriverState), "Driver", "Driver", 1) + `,`, + `IPAM:` + strings.Replace(fmt.Sprintf("%v", this.IPAM), "IPAMOptions", "IPAMOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Cluster) String() string { + if this == nil { + return "nil" + } + keysForBlacklistedCertificates := make([]string, 0, len(this.BlacklistedCertificates)) + for k, _ := range this.BlacklistedCertificates { + keysForBlacklistedCertificates = append(keysForBlacklistedCertificates, k) + } + sortkeys.Strings(keysForBlacklistedCertificates) + mapStringForBlacklistedCertificates := "map[string]*BlacklistedCertificate{" + for _, k := range keysForBlacklistedCertificates { + mapStringForBlacklistedCertificates += fmt.Sprintf("%v: %v,", k, this.BlacklistedCertificates[k]) + } + mapStringForBlacklistedCertificates += "}" + s := strings.Join([]string{`&Cluster{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterSpec", "ClusterSpec", 1), `&`, ``, 1) + `,`, + `RootCA:` + strings.Replace(strings.Replace(this.RootCA.String(), "RootCA", "RootCA", 1), `&`, ``, 1) + `,`, + `NetworkBootstrapKeys:` + strings.Replace(fmt.Sprintf("%v", this.NetworkBootstrapKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `EncryptionKeyLamportClock:` + fmt.Sprintf("%v", this.EncryptionKeyLamportClock) + `,`, + `BlacklistedCertificates:` + mapStringForBlacklistedCertificates + `,`, + `UnlockKeys:` + strings.Replace(fmt.Sprintf("%v", this.UnlockKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `FIPS:` + fmt.Sprintf("%v", this.FIPS) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Secret{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SecretSpec", "SecretSpec", 1), `&`, ``, 1) + `,`, + `Internal:` + fmt.Sprintf("%v", this.Internal) + `,`, + `}`, + }, "") + return s +} +func (this *Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Config{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ConfigSpec", "ConfigSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Payload:` + strings.Replace(fmt.Sprintf("%v", this.Payload), "Any", "google_protobuf3.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Extension{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func valueToStringObjects(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Meta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Meta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedAt == nil { + m.CreatedAt = &google_protobuf.Timestamp{} + } + if err := m.CreatedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdatedAt == nil { + m.UpdatedAt = &google_protobuf.Timestamp{} + } + if err := m.UpdatedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Node) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Description == nil { + m.Description = &NodeDescription{} + } + if err := m.Description.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagerStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ManagerStatus == nil { + m.ManagerStatus = &ManagerStatus{} + } + if err := m.ManagerStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attachment == nil { + m.Attachment = &NetworkAttachment{} + } + if err := m.Attachment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attachments = append(m.Attachments, &NetworkAttachment{}) + if err := m.Attachments[len(m.Attachments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Endpoint == nil { + m.Endpoint = &Endpoint{} + } + if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdateStatus == nil { + m.UpdateStatus = &UpdateStatus{} + } + if err := m.UpdateStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreviousSpec == nil { + m.PreviousSpec = &ServiceSpec{} + } + if err := m.PreviousSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SpecVersion == nil { + m.SpecVersion = &Version{} + } + if err := m.SpecVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousSpecVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreviousSpecVersion == nil { + m.PreviousSpecVersion = &Version{} + } + if err := m.PreviousSpecVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &EndpointSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortConfig{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VirtualIPs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VirtualIPs = append(m.VirtualIPs, &Endpoint_VirtualIP{}) + if err := m.VirtualIPs[len(m.VirtualIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoint_VirtualIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VirtualIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VirtualIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Task) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Task: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + m.Slot = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Slot |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServiceAnnotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType) + } + m.DesiredState = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredState |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkAttachment{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Endpoint == nil { + m.Endpoint = &Endpoint{} + } + if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogDriver == nil { + m.LogDriver = &Driver{} + } + if err := m.LogDriver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SpecVersion == nil { + m.SpecVersion = &Version{} + } + if err := m.SpecVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AssignedGenericResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AssignedGenericResources = append(m.AssignedGenericResources, &GenericResource{}) + if err := m.AssignedGenericResources[len(m.AssignedGenericResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Aliases = append(m.Aliases, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverAttachmentOpts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverAttachmentOpts == nil { + m.DriverAttachmentOpts = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.DriverAttachmentOpts[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Network) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Network: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Network: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverState == nil { + m.DriverState = &Driver{} + } + if err := m.DriverState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPAM", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPAM == nil { + m.IPAM = &IPAMOptions{} + } + if err := m.IPAM.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Cluster) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Cluster: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootCA", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RootCA.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkBootstrapKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkBootstrapKeys = append(m.NetworkBootstrapKeys, &EncryptionKey{}) + if err := m.NetworkBootstrapKeys[len(m.NetworkBootstrapKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EncryptionKeyLamportClock", wireType) + } + m.EncryptionKeyLamportClock = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EncryptionKeyLamportClock |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlacklistedCertificates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlacklistedCertificates == nil { + m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate) + } + var mapkey string + var mapvalue *BlacklistedCertificate + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthObjects + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthObjects + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BlacklistedCertificate{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.BlacklistedCertificates[mapkey] = mapvalue + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnlockKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnlockKeys = append(m.UnlockKeys, &EncryptionKey{}) + if err := m.UnlockKeys[len(m.UnlockKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FIPS", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FIPS = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Internal = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payload == nil { + m.Payload = &google_protobuf3.Any{} + } + if err := m.Payload.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Extension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Extension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Extension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipObjects(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthObjects + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipObjects(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthObjects = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowObjects = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/objects.proto", fileDescriptorObjects) } + +var fileDescriptorObjects = []byte{ + // 1544 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4d, 0x73, 0xdb, 0x4c, + 0x1d, 0xaf, 0x6c, 0xc5, 0x2f, 0x7f, 0x27, 0x26, 0xec, 0x13, 0x82, 0x6a, 0x82, 0x1d, 0xfc, 0x0c, + 0xcc, 0x33, 0xcf, 0x74, 0x9c, 0x12, 0x0a, 0xa4, 0x81, 0xd2, 0xda, 0x49, 0x68, 0x3d, 0xa5, 0x34, + 0xb3, 0x29, 0x2d, 0x37, 0xb1, 0x91, 0x36, 0xae, 0xb0, 0xac, 0xd5, 0x68, 0xd7, 0x2e, 0xbe, 0xf5, + 0x1c, 0x3e, 0x40, 0x6e, 0x1c, 0xfa, 0x2d, 0xb8, 0x70, 0xe0, 0xd4, 0x23, 0xc3, 0x81, 0xe1, 0x94, + 0xa1, 0xfe, 0x16, 0xcc, 0x70, 0x60, 0x76, 0xb5, 0xb2, 0x95, 0x58, 0x79, 0x63, 0x3a, 0x19, 0x4e, + 0xd1, 0x6a, 0x7f, 0xbf, 0xff, 0x9b, 0xfe, 0x6f, 0x31, 0xdc, 0xeb, 0x79, 0xe2, 0xed, 0xf0, 0xb0, + 0xe5, 0xb0, 0xc1, 0x86, 0xcb, 0x9c, 0x3e, 0x8d, 0x36, 0xf8, 0x3b, 0x12, 0x0d, 0xfa, 0x9e, 0xd8, + 0x20, 0xa1, 0xb7, 0xc1, 0x0e, 0x7f, 0x4f, 0x1d, 0xc1, 0x5b, 0x61, 0xc4, 0x04, 0x43, 0x28, 0x86, + 0xb4, 0x12, 0x48, 0x6b, 0xf4, 0xc3, 0xda, 0xd7, 0x57, 0x48, 0x10, 0xe3, 0x90, 0x6a, 0xfe, 0x95, + 0x58, 0x1e, 0x52, 0x27, 0xc1, 0x36, 0x7a, 0x8c, 0xf5, 0x7c, 0xba, 0xa1, 0x4e, 0x87, 0xc3, 0xa3, + 0x0d, 0xe1, 0x0d, 0x28, 0x17, 0x64, 0x10, 0x6a, 0xc0, 0x4a, 0x8f, 0xf5, 0x98, 0x7a, 0xdc, 0x90, + 0x4f, 0xfa, 0xed, 0xdd, 0xf3, 0x34, 0x12, 0x8c, 0xf5, 0xd5, 0x4f, 0x2f, 0xd1, 0x3e, 0x85, 0x87, + 0xfe, 0xb0, 0xe7, 0x05, 0xfa, 0x4f, 0x4c, 0x6c, 0xfe, 0xd9, 0x00, 0xf3, 0x05, 0x15, 0x04, 0xfd, + 0x0c, 0x8a, 0x23, 0x1a, 0x71, 0x8f, 0x05, 0x96, 0xb1, 0x6e, 0x7c, 0x55, 0xd9, 0xfc, 0x4e, 0x6b, + 0x3e, 0x22, 0xad, 0xd7, 0x31, 0xa4, 0x63, 0x7e, 0x3c, 0x6d, 0xdc, 0xc1, 0x09, 0x03, 0x3d, 0x04, + 0x70, 0x22, 0x4a, 0x04, 0x75, 0x6d, 0x22, 0xac, 0x9c, 0xe2, 0xd7, 0x5a, 0xb1, 0xb9, 0xad, 0x44, + 0x7f, 0xeb, 0x55, 0xe2, 0x25, 0x2e, 0x6b, 0x74, 0x5b, 0x48, 0xea, 0x30, 0x74, 0x13, 0x6a, 0xfe, + 0x6a, 0xaa, 0x46, 0xb7, 0x45, 0xf3, 0xfd, 0x02, 0x98, 0xbf, 0x66, 0x2e, 0x45, 0xab, 0x90, 0xf3, + 0x5c, 0x65, 0x76, 0xb9, 0x53, 0x98, 0x9c, 0x36, 0x72, 0xdd, 0x5d, 0x9c, 0xf3, 0x5c, 0xb4, 0x09, + 0xe6, 0x80, 0x0a, 0xa2, 0x0d, 0xb2, 0xb2, 0x1c, 0x92, 0xbe, 0x6b, 0x6f, 0x14, 0x16, 0xfd, 0x04, + 0x4c, 0xf9, 0xa9, 0xb4, 0x25, 0x6b, 0x59, 0x1c, 0xa9, 0xf3, 0x20, 0xa4, 0x4e, 0xc2, 0x93, 0x78, + 0xb4, 0x07, 0x15, 0x97, 0x72, 0x27, 0xf2, 0x42, 0x21, 0x63, 0x68, 0x2a, 0xfa, 0x97, 0x17, 0xd1, + 0x77, 0x67, 0x50, 0x9c, 0xe6, 0xa1, 0x9f, 0x43, 0x81, 0x0b, 0x22, 0x86, 0xdc, 0x5a, 0x50, 0x12, + 0xea, 0x17, 0x1a, 0xa0, 0x50, 0xda, 0x04, 0xcd, 0x41, 0xcf, 0xa0, 0x3a, 0x20, 0x01, 0xe9, 0xd1, + 0xc8, 0xd6, 0x52, 0x0a, 0x4a, 0xca, 0xf7, 0x32, 0x5d, 0x8f, 0x91, 0xb1, 0x20, 0xbc, 0x34, 0x48, + 0x1f, 0x51, 0x17, 0x80, 0x08, 0x41, 0x9c, 0xb7, 0x03, 0x1a, 0x08, 0xab, 0xa8, 0xa4, 0x7c, 0x3f, + 0xd3, 0x16, 0x2a, 0xde, 0xb1, 0xa8, 0xdf, 0x9e, 0x82, 0x3b, 0x39, 0xcb, 0xc0, 0x29, 0x32, 0x7a, + 0x0a, 0x15, 0x87, 0x46, 0xc2, 0x3b, 0xf2, 0x1c, 0x22, 0xa8, 0x55, 0x52, 0xb2, 0x1a, 0x59, 0xb2, + 0x76, 0x66, 0x30, 0xed, 0x58, 0x9a, 0x89, 0xee, 0x83, 0x19, 0x31, 0x9f, 0x5a, 0xe5, 0x75, 0xe3, + 0xab, 0xea, 0xc5, 0x9f, 0x06, 0x33, 0x9f, 0x62, 0x85, 0x94, 0xaa, 0x67, 0x86, 0x70, 0x0b, 0xd6, + 0xf3, 0xd7, 0x76, 0x03, 0xa7, 0x99, 0xdb, 0xab, 0xc7, 0x27, 0x4d, 0x04, 0xcb, 0x25, 0x63, 0xd9, + 0x50, 0x79, 0x66, 0xdc, 0x37, 0x7e, 0x6b, 0xfc, 0xce, 0x68, 0xfe, 0x27, 0x0f, 0xc5, 0x03, 0x1a, + 0x8d, 0x3c, 0xe7, 0xf3, 0x66, 0xe1, 0xc3, 0x33, 0x59, 0x98, 0x19, 0x2c, 0xad, 0x76, 0x2e, 0x11, + 0xb7, 0xa0, 0x44, 0x03, 0x37, 0x64, 0x5e, 0x20, 0x74, 0x16, 0x66, 0x46, 0x6a, 0x4f, 0x63, 0xf0, + 0x14, 0x8d, 0xf6, 0x60, 0x29, 0x2e, 0x2e, 0xfb, 0x4c, 0x0a, 0xae, 0x67, 0xd1, 0x7f, 0xa3, 0x80, + 0x3a, 0x77, 0x16, 0x87, 0xa9, 0x13, 0xda, 0x85, 0xa5, 0x30, 0xa2, 0x23, 0x8f, 0x0d, 0xb9, 0xad, + 0x9c, 0x28, 0x5c, 0xcb, 0x09, 0xbc, 0x98, 0xb0, 0xe4, 0x09, 0xfd, 0x02, 0x16, 0x25, 0xd9, 0x4e, + 0x9a, 0x12, 0x5c, 0xd9, 0x94, 0x70, 0x45, 0x12, 0xf4, 0x01, 0xbd, 0x84, 0x6f, 0x9d, 0xb1, 0x62, + 0x2a, 0xa8, 0x72, 0xb5, 0xa0, 0x2f, 0xd2, 0x96, 0xe8, 0x97, 0xdb, 0xe8, 0xf8, 0xa4, 0x59, 0x85, + 0xc5, 0x74, 0x0a, 0x34, 0xff, 0x94, 0x83, 0x52, 0x12, 0x48, 0xf4, 0x40, 0x7f, 0x33, 0xe3, 0xe2, + 0xa8, 0x25, 0x58, 0xe5, 0x6f, 0xfc, 0xb9, 0x1e, 0xc0, 0x42, 0xc8, 0x22, 0xc1, 0xad, 0x9c, 0x4a, + 0xce, 0xcc, 0x7a, 0xdf, 0x67, 0x91, 0xd8, 0x61, 0xc1, 0x91, 0xd7, 0xc3, 0x31, 0x18, 0xbd, 0x81, + 0xca, 0xc8, 0x8b, 0xc4, 0x90, 0xf8, 0xb6, 0x17, 0x72, 0x2b, 0xaf, 0xb8, 0x3f, 0xb8, 0x4c, 0x65, + 0xeb, 0x75, 0x8c, 0xef, 0xee, 0x77, 0xaa, 0x93, 0xd3, 0x06, 0x4c, 0x8f, 0x1c, 0x83, 0x16, 0xd5, + 0x0d, 0x79, 0xed, 0x05, 0x94, 0xa7, 0x37, 0xe8, 0x1e, 0x40, 0x10, 0xd7, 0x85, 0x3d, 0xcd, 0xec, + 0xa5, 0xc9, 0x69, 0xa3, 0xac, 0xab, 0xa5, 0xbb, 0x8b, 0xcb, 0x1a, 0xd0, 0x75, 0x11, 0x02, 0x93, + 0xb8, 0x6e, 0xa4, 0xf2, 0xbc, 0x8c, 0xd5, 0x73, 0xf3, 0x8f, 0x45, 0x30, 0x5f, 0x11, 0xde, 0xbf, + 0xed, 0x16, 0x2d, 0x75, 0xce, 0x55, 0xc6, 0x3d, 0x00, 0x1e, 0xe7, 0x9b, 0x74, 0xc7, 0x9c, 0xb9, + 0xa3, 0xb3, 0x50, 0xba, 0xa3, 0x01, 0xb1, 0x3b, 0xdc, 0x67, 0x42, 0x15, 0x81, 0x89, 0xd5, 0x33, + 0xfa, 0x12, 0x8a, 0x01, 0x73, 0x15, 0xbd, 0xa0, 0xe8, 0x30, 0x39, 0x6d, 0x14, 0x64, 0xd3, 0xe9, + 0xee, 0xe2, 0x82, 0xbc, 0xea, 0xba, 0xaa, 0xe9, 0x04, 0x01, 0x13, 0x44, 0x36, 0x74, 0xae, 0x7b, + 0x67, 0x66, 0xf6, 0xb7, 0x67, 0xb0, 0xa4, 0xdf, 0xa5, 0x98, 0xe8, 0x35, 0x7c, 0x91, 0xd8, 0x9b, + 0x16, 0x58, 0xba, 0x89, 0x40, 0xa4, 0x25, 0xa4, 0x6e, 0x52, 0x33, 0xa6, 0x7c, 0xf1, 0x8c, 0x51, + 0x11, 0xcc, 0x9a, 0x31, 0x1d, 0x58, 0x72, 0x29, 0xf7, 0x22, 0xea, 0xaa, 0x36, 0x41, 0x55, 0x65, + 0x56, 0x37, 0xbf, 0x7b, 0x99, 0x10, 0x8a, 0x17, 0x35, 0x47, 0x9d, 0x50, 0x1b, 0x4a, 0x3a, 0x6f, + 0xb8, 0x55, 0xb9, 0x49, 0x53, 0x9e, 0xd2, 0xce, 0xb4, 0xb9, 0xc5, 0x1b, 0xb5, 0xb9, 0x87, 0x00, + 0x3e, 0xeb, 0xd9, 0x6e, 0xe4, 0x8d, 0x68, 0x64, 0x2d, 0xe9, 0x8d, 0x23, 0x83, 0xbb, 0xab, 0x10, + 0xb8, 0xec, 0xb3, 0x5e, 0xfc, 0x38, 0xd7, 0x94, 0xaa, 0x37, 0x6c, 0x4a, 0x04, 0x6a, 0x84, 0x73, + 0xaf, 0x17, 0x50, 0xd7, 0xee, 0xd1, 0x80, 0x46, 0x9e, 0x63, 0x47, 0x94, 0xb3, 0x61, 0xe4, 0x50, + 0x6e, 0x7d, 0x43, 0x45, 0x22, 0x73, 0x67, 0x78, 0x1a, 0x83, 0xb1, 0xc6, 0x62, 0x2b, 0x11, 0x73, + 0xee, 0x82, 0x6f, 0xd7, 0x8e, 0x4f, 0x9a, 0xab, 0xb0, 0x92, 0x6e, 0x53, 0x5b, 0xc6, 0x13, 0xe3, + 0x99, 0xb1, 0x6f, 0x34, 0xff, 0x9a, 0x83, 0x6f, 0xce, 0xc5, 0x14, 0xfd, 0x18, 0x8a, 0x3a, 0xaa, + 0x97, 0x6d, 0x7e, 0x9a, 0x87, 0x13, 0x2c, 0x5a, 0x83, 0xb2, 0x2c, 0x71, 0xca, 0x39, 0x8d, 0x9b, + 0x57, 0x19, 0xcf, 0x5e, 0x20, 0x0b, 0x8a, 0xc4, 0xf7, 0x88, 0xbc, 0xcb, 0xab, 0xbb, 0xe4, 0x88, + 0x86, 0xb0, 0x1a, 0x87, 0xde, 0x9e, 0x0d, 0x58, 0x9b, 0x85, 0x82, 0x5b, 0xa6, 0xf2, 0xff, 0xf1, + 0xb5, 0x32, 0x41, 0x7f, 0x9c, 0xd9, 0x8b, 0x97, 0xa1, 0xe0, 0x7b, 0x81, 0x88, 0xc6, 0x78, 0xc5, + 0xcd, 0xb8, 0xaa, 0x3d, 0x85, 0xbb, 0x17, 0x52, 0xd0, 0x32, 0xe4, 0xfb, 0x74, 0x1c, 0xb7, 0x27, + 0x2c, 0x1f, 0xd1, 0x0a, 0x2c, 0x8c, 0x88, 0x3f, 0xa4, 0xba, 0x9b, 0xc5, 0x87, 0xed, 0xdc, 0x96, + 0xd1, 0xfc, 0x90, 0x83, 0xa2, 0x36, 0xe7, 0xb6, 0x47, 0xbe, 0x56, 0x3b, 0xd7, 0xd8, 0x1e, 0xc1, + 0xa2, 0x0e, 0x69, 0x5c, 0x91, 0xe6, 0x95, 0x39, 0x5d, 0x89, 0xf1, 0x71, 0x35, 0x3e, 0x02, 0xd3, + 0x0b, 0xc9, 0x40, 0x8f, 0xfb, 0x4c, 0xcd, 0xdd, 0xfd, 0xf6, 0x8b, 0x97, 0x61, 0xdc, 0x58, 0x4a, + 0x93, 0xd3, 0x86, 0x29, 0x5f, 0x60, 0x45, 0xcb, 0x1c, 0x8c, 0x7f, 0x5f, 0x80, 0xe2, 0x8e, 0x3f, + 0xe4, 0x82, 0x46, 0xb7, 0x1d, 0x24, 0xad, 0x76, 0x2e, 0x48, 0x3b, 0x50, 0x8c, 0x18, 0x13, 0xb6, + 0x43, 0x2e, 0x8b, 0x0f, 0x66, 0x4c, 0xec, 0xb4, 0x3b, 0x55, 0x49, 0x94, 0xbd, 0x3d, 0x3e, 0xe3, + 0x82, 0xa4, 0xee, 0x10, 0xf4, 0x06, 0x56, 0x93, 0x89, 0x78, 0xc8, 0x98, 0xe0, 0x22, 0x22, 0xa1, + 0xdd, 0xa7, 0x63, 0xb9, 0x2b, 0xe5, 0x2f, 0x5a, 0xb4, 0xf7, 0x02, 0x27, 0x1a, 0xab, 0xe0, 0x3d, + 0xa7, 0x63, 0xbc, 0xa2, 0x05, 0x74, 0x12, 0xfe, 0x73, 0x3a, 0xe6, 0xe8, 0x31, 0xac, 0xd1, 0x29, + 0x4c, 0x4a, 0xb4, 0x7d, 0x32, 0x90, 0xb3, 0xde, 0x76, 0x7c, 0xe6, 0xf4, 0xd5, 0xb8, 0x31, 0xf1, + 0x5d, 0x9a, 0x16, 0xf5, 0xab, 0x18, 0xb1, 0x23, 0x01, 0x88, 0x83, 0x75, 0xe8, 0x13, 0xa7, 0xef, + 0x7b, 0x5c, 0xfe, 0x2f, 0x95, 0xda, 0x9b, 0xe5, 0xc4, 0x90, 0xb6, 0x6d, 0x5d, 0x12, 0xad, 0x56, + 0x67, 0xc6, 0x4d, 0x6d, 0xe1, 0xba, 0xa2, 0xbe, 0x7d, 0x98, 0x7d, 0x8b, 0x3a, 0x50, 0x19, 0x06, + 0x52, 0x7d, 0x1c, 0x83, 0xf2, 0x75, 0x63, 0x00, 0x31, 0x4b, 0x79, 0xbe, 0x06, 0xe6, 0x91, 0xdc, + 0x61, 0xe4, 0x18, 0x29, 0xc5, 0xc9, 0xf5, 0xcb, 0xee, 0xfe, 0x01, 0x56, 0x6f, 0x6b, 0x23, 0x58, + 0xbb, 0xcc, 0xb4, 0x8c, 0xca, 0x7d, 0x92, 0xae, 0xdc, 0xca, 0xe6, 0xd7, 0x59, 0xd6, 0x64, 0x8b, + 0x4c, 0x55, 0x79, 0x66, 0x52, 0xff, 0xc5, 0x80, 0xc2, 0x01, 0x75, 0x22, 0x2a, 0x3e, 0x6b, 0x4e, + 0x6f, 0x9d, 0xc9, 0xe9, 0x7a, 0xf6, 0x9a, 0x2c, 0xb5, 0xce, 0xa5, 0x74, 0x0d, 0x4a, 0x5e, 0x20, + 0x68, 0x14, 0x10, 0x5f, 0xe5, 0x74, 0x09, 0x4f, 0xcf, 0x99, 0x0e, 0x7c, 0x30, 0xa0, 0x10, 0xef, + 0x91, 0xb7, 0xed, 0x40, 0xac, 0xf5, 0xbc, 0x03, 0x99, 0x46, 0xfe, 0xdb, 0x80, 0x52, 0x32, 0xce, + 0x3e, 0xab, 0x99, 0xe7, 0xf6, 0xb2, 0xfc, 0xff, 0xbc, 0x97, 0x21, 0x30, 0xfb, 0x5e, 0xa0, 0x37, + 0x48, 0xac, 0x9e, 0x51, 0x0b, 0x8a, 0x21, 0x19, 0xfb, 0x8c, 0xb8, 0xba, 0x8d, 0xae, 0xcc, 0xfd, + 0x86, 0xd1, 0x0e, 0xc6, 0x38, 0x01, 0x6d, 0xaf, 0x1c, 0x9f, 0x34, 0x97, 0xa1, 0x9a, 0xf6, 0xfc, + 0xad, 0xd1, 0xfc, 0x87, 0x01, 0xe5, 0xbd, 0x3f, 0x08, 0x1a, 0xa8, 0x6d, 0xe1, 0xff, 0xd2, 0xf9, + 0xf5, 0xf9, 0xdf, 0x39, 0xca, 0x67, 0x7e, 0xc2, 0xc8, 0xfa, 0xa8, 0x1d, 0xeb, 0xe3, 0xa7, 0xfa, + 0x9d, 0x7f, 0x7e, 0xaa, 0xdf, 0x79, 0x3f, 0xa9, 0x1b, 0x1f, 0x27, 0x75, 0xe3, 0x6f, 0x93, 0xba, + 0xf1, 0xaf, 0x49, 0xdd, 0x38, 0x2c, 0xa8, 0xf8, 0xfc, 0xe8, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x07, 0xc5, 0x5a, 0x5b, 0xae, 0x13, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/objects.proto b/extender/code/vendor/github.com/docker/swarmkit/api/objects.proto new file mode 100644 index 000000000..5ef45f0a7 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/objects.proto @@ -0,0 +1,464 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "github.com/docker/swarmkit/api/specs.proto"; +import "google/protobuf/timestamp.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// This file contains definitions for all first-class objects in the cluster +// API. Such types typically have a corresponding specification, with the +// naming XXXSpec, but not all. + +// Meta contains metadata about objects. Every object contains a meta field. +message Meta { + // Version tracks the current version of the object. + Version version = 1 [(gogoproto.nullable) = false]; + + // Object timestamps. + // Note: can't use stdtime because these fields are nullable. + google.protobuf.Timestamp created_at = 2; + google.protobuf.Timestamp updated_at = 3; +} + +// Node provides the internal node state as seen by the cluster. +message Node { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + role: true + membership: true + } + }; + + // ID specifies the identity of the node. + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + NodeSpec spec = 3 [(gogoproto.nullable) = false]; + + // Description encapsulated the properties of the Node as reported by the + // agent. + NodeDescription description = 4; + + // Status provides the current status of the node, as seen by the manager. + NodeStatus status = 5 [(gogoproto.nullable) = false]; + + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus manager_status = 6; + + // DEPRECATED: Use Attachments to find the ingress network + // The node attachment to the ingress network. + NetworkAttachment attachment = 7 [deprecated=true]; + + // Certificate is the TLS certificate issued for the node, if any. + Certificate certificate = 8 [(gogoproto.nullable) = false]; + + // Role is the *observed* role for this node. It differs from the + // desired role set in Node.Spec.Role because the role here is only + // updated after the Raft member list has been reconciled with the + // desired role from the spec. + // + // This field represents the current reconciled state. If an action is + // to be performed, first verify the role in the cert. This field only + // shows the privilege level that the CA would currently grant when + // issuing or renewing the node's certificate. + NodeRole role = 9; + + // Attachments enumerates the network attachments for the node to set up an + // endpoint on the node to be used for load balancing. Each overlay + // network, including ingress network, will have an NetworkAttachment. + repeated NetworkAttachment attachments = 10; +} + +message Service { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + ServiceSpec spec = 3 [(gogoproto.nullable) = false]; + + // SpecVersion versions Spec, to identify changes in the spec. Note that + // this is not directly comparable to the service's Version. + Version spec_version = 10; + + // PreviousSpec is the previous service spec that was in place before + // "Spec". + ServiceSpec previous_spec = 6; + + // PreviousSpecVersion versions PreviousSpec. Note that this is not + // directly comparable to the service's Version. + Version previous_spec_version = 11; + + // Runtime state of service endpoint. This may be different + // from the spec version because the user may not have entered + // the optional fields like node_port or virtual_ip and it + // could be auto allocated by the system. + Endpoint endpoint = 4; + + // UpdateStatus contains the status of an update, if one is in + // progress. + UpdateStatus update_status = 5; +} + +// Endpoint specified all the network parameters required to +// correctly discover and load balance a service +message Endpoint { + EndpointSpec spec = 1; + + // Runtime state of the exposed ports which may carry + // auto-allocated swarm ports in addition to the user + // configured information. + repeated PortConfig ports = 2; + + // An endpoint attachment specifies the data that the process + // of attaching an endpoint to a network creates. + + // VirtualIP specifies a set of networks this endpoint will be attached to + // and the IP addresses the target service will be made available under. + message VirtualIP { + // NetworkID for which this endpoint attachment was created. + string network_id = 1; + + // A virtual IP is used to address this service in IP + // layer that the client can use to send requests to + // this service. A DNS A/AAAA query on the service + // name might return this IP to the client. This is + // strictly a logical IP and there may not be any + // interfaces assigned this IP address or any route + // created for this address. More than one to + // accommodate for both IPv4 and IPv6 + string addr = 2; + } + + // VirtualIPs specifies the IP addresses under which this endpoint will be + // made available. + repeated VirtualIP virtual_ips = 3 [(gogoproto.customname) = "VirtualIPs"]; +} + +// Task specifies the parameters for implementing a Spec. A task is effectively +// immutable and idempotent. Once it is dispatched to a node, it will not be +// dispatched to another node. +message Task { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + service_id: true + node_id: true + slot: true + desired_state: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + // Spec defines the desired state of the task as specified by the user. + // The system will honor this and will *never* modify it. + TaskSpec spec = 3 [(gogoproto.nullable) = false]; + + // SpecVersion is copied from Service, to identify which version of the + // spec this task has. Note that this is not directly comparable to the + // service's Version. + Version spec_version = 14; + + // ServiceID indicates the service under which this task is orchestrated. This + // should almost always be set. + string service_id = 4; + + // Slot is the service slot number for a task. + // For example, if a replicated service has replicas = 2, there will be a + // task with slot = 1, and another with slot = 2. + uint64 slot = 5; + + // NodeID indicates the node to which the task is assigned. If this field + // is empty or not set, the task is unassigned. + string node_id = 6; + + // Annotations defines the names and labels for the runtime, as set by + // the cluster manager. + // + // As backup, if this field has an empty name, the runtime will + // allocate a unique name for the actual container. + // + // NOTE(stevvooe): The preserves the ability for us to making naming + // decisions for tasks in orchestrator, albeit, this is left empty for now. + Annotations annotations = 7 [(gogoproto.nullable) = false]; + + // ServiceAnnotations is a direct copy of the service name and labels when + // this task is created. + // + // Labels set here will *not* be propagated to the runtime target, such as a + // container. Use labels on the runtime target for that purpose. + Annotations service_annotations = 8 [(gogoproto.nullable) = false]; + + TaskStatus status = 9 [(gogoproto.nullable) = false]; + + // DesiredState is the target state for the task. It is set to + // TaskStateRunning when a task is first created, and changed to + // TaskStateShutdown if the manager wants to terminate the task. This field + // is only written by the manager. + TaskState desired_state = 10; + + // List of network attachments by the task. + repeated NetworkAttachment networks = 11; + + // A copy of runtime state of service endpoint from Service + // object to be distributed to agents as part of the task. + Endpoint endpoint = 12; + + // LogDriver specifies the selected log driver to use for the task. Agent + // processes should always favor the value in this field. + // + // If present in the TaskSpec, this will be a copy of that value. The + // orchestrator may choose to insert a value here, which should be honored, + // such a cluster default or policy-based value. + // + // If not present, the daemon's default will be used. + Driver log_driver = 13; + + repeated GenericResource assigned_generic_resources = 15; +} + +// NetworkAttachment specifies the network parameters of attachment to +// a single network by an object such as task or node. +message NetworkAttachment { + // Network state as a whole becomes part of the object so that + // it always is available for use in agents so that agents + // don't have any other dependency during execution. + Network network = 1; + + // List of IPv4/IPv6 addresses that are assigned to the object + // as part of getting attached to this network. + repeated string addresses = 2; + + // List of aliases by which a task is resolved in a network + repeated string aliases = 3; + + // Map of all the driver attachment options for this network + map driver_attachment_opts = 4; +} + +message Network { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + NetworkSpec spec = 3 [(gogoproto.nullable) = false]; + + // Driver specific operational state provided by the network driver. + Driver driver_state = 4; + + // Runtime state of IPAM options. This may not reflect the + // ipam options from NetworkSpec. + IPAMOptions ipam = 5 [(gogoproto.customname) = "IPAM"]; +} + +// Cluster provides global cluster settings. +message Cluster { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + ClusterSpec spec = 3 [(gogoproto.nullable) = false]; + + // RootCA contains key material for the root CA. + RootCA root_ca = 4 [(gogoproto.nullable)=false, (gogoproto.customname) = "RootCA"]; + + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + repeated EncryptionKey network_bootstrap_keys = 5; + + // Logical clock used to timestamp every key. It allows other managers + // and agents to unambiguously identify the older key to be deleted when + // a new key is allocated on key rotation. + uint64 encryption_key_lamport_clock = 6; + + // BlacklistedCertificates tracks certificates that should no longer + // be honored. It's a mapping from CN -> BlacklistedCertificate. + // swarm. Their certificates should effectively be blacklisted. + map blacklisted_certificates = 8; + + // UnlockKeys defines the keys that lock node data at rest. For example, + // this would contain the key encrypting key (KEK) that will encrypt the + // manager TLS keys at rest and the raft encryption keys at rest. + // If the key is empty, the node will be unlocked (will not require a key + // to start up from a shut down state). + repeated EncryptionKey unlock_keys = 9; + + // FIPS specifies whether this cluster should be in FIPS mode. This changes + // the format of the join tokens, and nodes that are not FIPS-enabled should + // reject joining the cluster. Nodes that report themselves to be non-FIPS + // should be rejected from the cluster. + bool fips = 10 [(gogoproto.customname) = "FIPS"]; +} + +// Secret represents a secret that should be passed to a container or a node, +// and is immutable. +message Secret { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + // Spec contains the actual secret data, as well as any context around the + // secret data that the user provides. + SecretSpec spec = 3 [(gogoproto.nullable) = false]; + + // Whether the secret is an internal secret (not set by a user) or not. + bool internal = 4; +} + +// Config represents a set of configuration files that should be passed to a +// container. +message Config { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + // Spec contains the actual config data, as well as any context around the + // config data that the user provides. + ConfigSpec spec = 3 [(gogoproto.nullable) = false]; +} + +// Resource is a top-level object with externally defined content and indexing. +// SwarmKit can serve as a store for these objects without understanding their +// meanings. +message Resource { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + kind: true + } + }; + + string id = 1 [(gogoproto.customname) = "ID"]; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + Annotations annotations = 3 [(gogoproto.nullable) = false]; + + // Kind identifies this class of object. It is essentially a namespace + // to keep IDs or indices from colliding between unrelated Resource + // objects. This must correspond to the name of an Extension. + string kind = 4; + + // Payload bytes. This data is not interpreted in any way by SwarmKit. + // By convention, it should be a marshalled protocol buffers message. + google.protobuf.Any payload = 5; +} + +// Extension declares a type of "resource" object. This message provides some +// metadata about the objects. +message Extension { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1 [(gogoproto.customname) = "ID"]; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + Annotations annotations = 3 [(gogoproto.nullable) = false]; + + string description = 4; + + // TODO(aaronl): Add optional indexing capabilities. It would be + // extremely useful be able to automatically introspect protobuf, json, + // etc. objects and automatically index them based on a schema and field + // paths defined here. + // + //oneof Schema { + // google.protobuf.Descriptor protobuf = 1; + // bytes json = 2; + //} + // + //Schema schema = 5; + // + // // Indices, with values expressed as Go templates. + //repeated IndexEntry index_templates = 6; +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/raft.pb.go b/extender/code/vendor/github.com/docker/swarmkit/api/raft.pb.go new file mode 100644 index 000000000..058b29450 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/raft.pb.go @@ -0,0 +1,4008 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/raft.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import raftpb "github.com/coreos/etcd/raft/raftpb" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +// skipping weak import docker_protobuf_plugin "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// StoreActionKind defines the operation to take on the store for the target of +// a storage action. +type StoreActionKind int32 + +const ( + StoreActionKindUnknown StoreActionKind = 0 + StoreActionKindCreate StoreActionKind = 1 + StoreActionKindUpdate StoreActionKind = 2 + StoreActionKindRemove StoreActionKind = 3 +) + +var StoreActionKind_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STORE_ACTION_CREATE", + 2: "STORE_ACTION_UPDATE", + 3: "STORE_ACTION_REMOVE", +} +var StoreActionKind_value = map[string]int32{ + "UNKNOWN": 0, + "STORE_ACTION_CREATE": 1, + "STORE_ACTION_UPDATE": 2, + "STORE_ACTION_REMOVE": 3, +} + +func (x StoreActionKind) String() string { + return proto.EnumName(StoreActionKind_name, int32(x)) +} +func (StoreActionKind) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type RaftMember struct { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` + // NodeID is the node's ID. + NodeID string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Addr specifies the address of the member + Addr string `protobuf:"bytes,3,opt,name=addr,proto3" json:"addr,omitempty"` + // Status provides the current status of the manager from the perspective of another manager. + Status RaftMemberStatus `protobuf:"bytes,4,opt,name=status" json:"status"` +} + +func (m *RaftMember) Reset() { *m = RaftMember{} } +func (*RaftMember) ProtoMessage() {} +func (*RaftMember) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type JoinRequest struct { + // Addr specifies the address of the member + Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *JoinRequest) Reset() { *m = JoinRequest{} } +func (*JoinRequest) ProtoMessage() {} +func (*JoinRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type JoinResponse struct { + // RaftID is the ID assigned to the new member. + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` + // Members is the membership set of the cluster. + Members []*RaftMember `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` + // RemovedMembers is a list of members that have been removed from + // the cluster, so the new node can avoid communicating with them. + RemovedMembers []uint64 `protobuf:"varint,3,rep,name=removed_members,json=removedMembers" json:"removed_members,omitempty"` +} + +func (m *JoinResponse) Reset() { *m = JoinResponse{} } +func (*JoinResponse) ProtoMessage() {} +func (*JoinResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type LeaveRequest struct { + Node *RaftMember `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` +} + +func (m *LeaveRequest) Reset() { *m = LeaveRequest{} } +func (*LeaveRequest) ProtoMessage() {} +func (*LeaveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type LeaveResponse struct { +} + +func (m *LeaveResponse) Reset() { *m = LeaveResponse{} } +func (*LeaveResponse) ProtoMessage() {} +func (*LeaveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } + +type ProcessRaftMessageRequest struct { + Message *raftpb.Message `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *ProcessRaftMessageRequest) Reset() { *m = ProcessRaftMessageRequest{} } +func (*ProcessRaftMessageRequest) ProtoMessage() {} +func (*ProcessRaftMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } + +type ProcessRaftMessageResponse struct { +} + +func (m *ProcessRaftMessageResponse) Reset() { *m = ProcessRaftMessageResponse{} } +func (*ProcessRaftMessageResponse) ProtoMessage() {} +func (*ProcessRaftMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +// Raft message streaming request. +type StreamRaftMessageRequest struct { + Message *raftpb.Message `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *StreamRaftMessageRequest) Reset() { *m = StreamRaftMessageRequest{} } +func (*StreamRaftMessageRequest) ProtoMessage() {} +func (*StreamRaftMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} } + +// Raft message streaming response. +type StreamRaftMessageResponse struct { +} + +func (m *StreamRaftMessageResponse) Reset() { *m = StreamRaftMessageResponse{} } +func (*StreamRaftMessageResponse) ProtoMessage() {} +func (*StreamRaftMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{8} } + +type ResolveAddressRequest struct { + // raft_id is the ID to resolve to an address. + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` +} + +func (m *ResolveAddressRequest) Reset() { *m = ResolveAddressRequest{} } +func (*ResolveAddressRequest) ProtoMessage() {} +func (*ResolveAddressRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{9} } + +type ResolveAddressResponse struct { + // Addr specifies the address of the member + Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *ResolveAddressResponse) Reset() { *m = ResolveAddressResponse{} } +func (*ResolveAddressResponse) ProtoMessage() {} +func (*ResolveAddressResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{10} } + +// Contains one of many protobuf encoded objects to replicate +// over the raft backend with a request ID to track when the +// action is effectively applied +type InternalRaftRequest struct { + ID uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Action []StoreAction `protobuf:"bytes,2,rep,name=action" json:"action"` +} + +func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } +func (*InternalRaftRequest) ProtoMessage() {} +func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{11} } + +// StoreAction defines a target and operation to apply on the storage system. +type StoreAction struct { + Action StoreActionKind `protobuf:"varint,1,opt,name=action,proto3,enum=docker.swarmkit.v1.StoreActionKind" json:"action,omitempty"` + // Types that are valid to be assigned to Target: + // *StoreAction_Node + // *StoreAction_Service + // *StoreAction_Task + // *StoreAction_Network + // *StoreAction_Cluster + // *StoreAction_Secret + // *StoreAction_Resource + // *StoreAction_Extension + // *StoreAction_Config + Target isStoreAction_Target `protobuf_oneof:"target"` +} + +func (m *StoreAction) Reset() { *m = StoreAction{} } +func (*StoreAction) ProtoMessage() {} +func (*StoreAction) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{12} } + +type isStoreAction_Target interface { + isStoreAction_Target() + MarshalTo([]byte) (int, error) + Size() int +} + +type StoreAction_Node struct { + Node *Node `protobuf:"bytes,2,opt,name=node,oneof"` +} +type StoreAction_Service struct { + Service *Service `protobuf:"bytes,3,opt,name=service,oneof"` +} +type StoreAction_Task struct { + Task *Task `protobuf:"bytes,4,opt,name=task,oneof"` +} +type StoreAction_Network struct { + Network *Network `protobuf:"bytes,5,opt,name=network,oneof"` +} +type StoreAction_Cluster struct { + Cluster *Cluster `protobuf:"bytes,6,opt,name=cluster,oneof"` +} +type StoreAction_Secret struct { + Secret *Secret `protobuf:"bytes,7,opt,name=secret,oneof"` +} +type StoreAction_Resource struct { + Resource *Resource `protobuf:"bytes,8,opt,name=resource,oneof"` +} +type StoreAction_Extension struct { + Extension *Extension `protobuf:"bytes,9,opt,name=extension,oneof"` +} +type StoreAction_Config struct { + Config *Config `protobuf:"bytes,10,opt,name=config,oneof"` +} + +func (*StoreAction_Node) isStoreAction_Target() {} +func (*StoreAction_Service) isStoreAction_Target() {} +func (*StoreAction_Task) isStoreAction_Target() {} +func (*StoreAction_Network) isStoreAction_Target() {} +func (*StoreAction_Cluster) isStoreAction_Target() {} +func (*StoreAction_Secret) isStoreAction_Target() {} +func (*StoreAction_Resource) isStoreAction_Target() {} +func (*StoreAction_Extension) isStoreAction_Target() {} +func (*StoreAction_Config) isStoreAction_Target() {} + +func (m *StoreAction) GetTarget() isStoreAction_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *StoreAction) GetNode() *Node { + if x, ok := m.GetTarget().(*StoreAction_Node); ok { + return x.Node + } + return nil +} + +func (m *StoreAction) GetService() *Service { + if x, ok := m.GetTarget().(*StoreAction_Service); ok { + return x.Service + } + return nil +} + +func (m *StoreAction) GetTask() *Task { + if x, ok := m.GetTarget().(*StoreAction_Task); ok { + return x.Task + } + return nil +} + +func (m *StoreAction) GetNetwork() *Network { + if x, ok := m.GetTarget().(*StoreAction_Network); ok { + return x.Network + } + return nil +} + +func (m *StoreAction) GetCluster() *Cluster { + if x, ok := m.GetTarget().(*StoreAction_Cluster); ok { + return x.Cluster + } + return nil +} + +func (m *StoreAction) GetSecret() *Secret { + if x, ok := m.GetTarget().(*StoreAction_Secret); ok { + return x.Secret + } + return nil +} + +func (m *StoreAction) GetResource() *Resource { + if x, ok := m.GetTarget().(*StoreAction_Resource); ok { + return x.Resource + } + return nil +} + +func (m *StoreAction) GetExtension() *Extension { + if x, ok := m.GetTarget().(*StoreAction_Extension); ok { + return x.Extension + } + return nil +} + +func (m *StoreAction) GetConfig() *Config { + if x, ok := m.GetTarget().(*StoreAction_Config); ok { + return x.Config + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StoreAction) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StoreAction_OneofMarshaler, _StoreAction_OneofUnmarshaler, _StoreAction_OneofSizer, []interface{}{ + (*StoreAction_Node)(nil), + (*StoreAction_Service)(nil), + (*StoreAction_Task)(nil), + (*StoreAction_Network)(nil), + (*StoreAction_Cluster)(nil), + (*StoreAction_Secret)(nil), + (*StoreAction_Resource)(nil), + (*StoreAction_Extension)(nil), + (*StoreAction_Config)(nil), + } +} + +func _StoreAction_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StoreAction) + // target + switch x := m.Target.(type) { + case *StoreAction_Node: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Node); err != nil { + return err + } + case *StoreAction_Service: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Service); err != nil { + return err + } + case *StoreAction_Task: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Task); err != nil { + return err + } + case *StoreAction_Network: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Network); err != nil { + return err + } + case *StoreAction_Cluster: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Cluster); err != nil { + return err + } + case *StoreAction_Secret: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Secret); err != nil { + return err + } + case *StoreAction_Resource: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Resource); err != nil { + return err + } + case *StoreAction_Extension: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Extension); err != nil { + return err + } + case *StoreAction_Config: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("StoreAction.Target has unexpected type %T", x) + } + return nil +} + +func _StoreAction_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StoreAction) + switch tag { + case 2: // target.node + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Node) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Node{msg} + return true, err + case 3: // target.service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Service) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Service{msg} + return true, err + case 4: // target.task + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Task) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Task{msg} + return true, err + case 5: // target.network + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Network) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Network{msg} + return true, err + case 6: // target.cluster + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cluster) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Cluster{msg} + return true, err + case 7: // target.secret + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Secret) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Secret{msg} + return true, err + case 8: // target.resource + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Resource) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Resource{msg} + return true, err + case 9: // target.extension + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Extension) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Extension{msg} + return true, err + case 10: // target.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Config) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Config{msg} + return true, err + default: + return false, nil + } +} + +func _StoreAction_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StoreAction) + // target + switch x := m.Target.(type) { + case *StoreAction_Node: + s := proto.Size(x.Node) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Service: + s := proto.Size(x.Service) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Task: + s := proto.Size(x.Task) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Network: + s := proto.Size(x.Network) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Cluster: + s := proto.Size(x.Cluster) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Secret: + s := proto.Size(x.Secret) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Resource: + s := proto.Size(x.Resource) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Extension: + s := proto.Size(x.Extension) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*RaftMember)(nil), "docker.swarmkit.v1.RaftMember") + proto.RegisterType((*JoinRequest)(nil), "docker.swarmkit.v1.JoinRequest") + proto.RegisterType((*JoinResponse)(nil), "docker.swarmkit.v1.JoinResponse") + proto.RegisterType((*LeaveRequest)(nil), "docker.swarmkit.v1.LeaveRequest") + proto.RegisterType((*LeaveResponse)(nil), "docker.swarmkit.v1.LeaveResponse") + proto.RegisterType((*ProcessRaftMessageRequest)(nil), "docker.swarmkit.v1.ProcessRaftMessageRequest") + proto.RegisterType((*ProcessRaftMessageResponse)(nil), "docker.swarmkit.v1.ProcessRaftMessageResponse") + proto.RegisterType((*StreamRaftMessageRequest)(nil), "docker.swarmkit.v1.StreamRaftMessageRequest") + proto.RegisterType((*StreamRaftMessageResponse)(nil), "docker.swarmkit.v1.StreamRaftMessageResponse") + proto.RegisterType((*ResolveAddressRequest)(nil), "docker.swarmkit.v1.ResolveAddressRequest") + proto.RegisterType((*ResolveAddressResponse)(nil), "docker.swarmkit.v1.ResolveAddressResponse") + proto.RegisterType((*InternalRaftRequest)(nil), "docker.swarmkit.v1.InternalRaftRequest") + proto.RegisterType((*StoreAction)(nil), "docker.swarmkit.v1.StoreAction") + proto.RegisterEnum("docker.swarmkit.v1.StoreActionKind", StoreActionKind_name, StoreActionKind_value) +} + +type authenticatedWrapperRaftServer struct { + local RaftServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperRaftServer(local RaftServer, authorize func(context.Context, []string) error) RaftServer { + return &authenticatedWrapperRaftServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ProcessRaftMessage(ctx, r) +} + +func (p *authenticatedWrapperRaftServer) StreamRaftMessage(stream Raft_StreamRaftMessageServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil { + return err + } + return p.local.StreamRaftMessage(stream) +} + +func (p *authenticatedWrapperRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ResolveAddress(ctx, r) +} + +type authenticatedWrapperRaftMembershipServer struct { + local RaftMembershipServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperRaftMembershipServer(local RaftMembershipServer, authorize func(context.Context, []string) error) RaftMembershipServer { + return &authenticatedWrapperRaftMembershipServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperRaftMembershipServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.Join(ctx, r) +} + +func (p *authenticatedWrapperRaftMembershipServer) Leave(ctx context.Context, r *LeaveRequest) (*LeaveResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.Leave(ctx, r) +} + +func (m *RaftMember) Copy() *RaftMember { + if m == nil { + return nil + } + o := &RaftMember{} + o.CopyFrom(m) + return o +} + +func (m *RaftMember) CopyFrom(src interface{}) { + + o := src.(*RaftMember) + *m = *o + deepcopy.Copy(&m.Status, &o.Status) +} + +func (m *JoinRequest) Copy() *JoinRequest { + if m == nil { + return nil + } + o := &JoinRequest{} + o.CopyFrom(m) + return o +} + +func (m *JoinRequest) CopyFrom(src interface{}) { + + o := src.(*JoinRequest) + *m = *o +} + +func (m *JoinResponse) Copy() *JoinResponse { + if m == nil { + return nil + } + o := &JoinResponse{} + o.CopyFrom(m) + return o +} + +func (m *JoinResponse) CopyFrom(src interface{}) { + + o := src.(*JoinResponse) + *m = *o + if o.Members != nil { + m.Members = make([]*RaftMember, len(o.Members)) + for i := range m.Members { + m.Members[i] = &RaftMember{} + deepcopy.Copy(m.Members[i], o.Members[i]) + } + } + + if o.RemovedMembers != nil { + m.RemovedMembers = make([]uint64, len(o.RemovedMembers)) + copy(m.RemovedMembers, o.RemovedMembers) + } + +} + +func (m *LeaveRequest) Copy() *LeaveRequest { + if m == nil { + return nil + } + o := &LeaveRequest{} + o.CopyFrom(m) + return o +} + +func (m *LeaveRequest) CopyFrom(src interface{}) { + + o := src.(*LeaveRequest) + *m = *o + if o.Node != nil { + m.Node = &RaftMember{} + deepcopy.Copy(m.Node, o.Node) + } +} + +func (m *LeaveResponse) Copy() *LeaveResponse { + if m == nil { + return nil + } + o := &LeaveResponse{} + o.CopyFrom(m) + return o +} + +func (m *LeaveResponse) CopyFrom(src interface{}) {} +func (m *ProcessRaftMessageResponse) Copy() *ProcessRaftMessageResponse { + if m == nil { + return nil + } + o := &ProcessRaftMessageResponse{} + o.CopyFrom(m) + return o +} + +func (m *ProcessRaftMessageResponse) CopyFrom(src interface{}) {} +func (m *StreamRaftMessageResponse) Copy() *StreamRaftMessageResponse { + if m == nil { + return nil + } + o := &StreamRaftMessageResponse{} + o.CopyFrom(m) + return o +} + +func (m *StreamRaftMessageResponse) CopyFrom(src interface{}) {} +func (m *ResolveAddressRequest) Copy() *ResolveAddressRequest { + if m == nil { + return nil + } + o := &ResolveAddressRequest{} + o.CopyFrom(m) + return o +} + +func (m *ResolveAddressRequest) CopyFrom(src interface{}) { + + o := src.(*ResolveAddressRequest) + *m = *o +} + +func (m *ResolveAddressResponse) Copy() *ResolveAddressResponse { + if m == nil { + return nil + } + o := &ResolveAddressResponse{} + o.CopyFrom(m) + return o +} + +func (m *ResolveAddressResponse) CopyFrom(src interface{}) { + + o := src.(*ResolveAddressResponse) + *m = *o +} + +func (m *InternalRaftRequest) Copy() *InternalRaftRequest { + if m == nil { + return nil + } + o := &InternalRaftRequest{} + o.CopyFrom(m) + return o +} + +func (m *InternalRaftRequest) CopyFrom(src interface{}) { + + o := src.(*InternalRaftRequest) + *m = *o + if o.Action != nil { + m.Action = make([]StoreAction, len(o.Action)) + for i := range m.Action { + deepcopy.Copy(&m.Action[i], &o.Action[i]) + } + } + +} + +func (m *StoreAction) Copy() *StoreAction { + if m == nil { + return nil + } + o := &StoreAction{} + o.CopyFrom(m) + return o +} + +func (m *StoreAction) CopyFrom(src interface{}) { + + o := src.(*StoreAction) + *m = *o + if o.Target != nil { + switch o.Target.(type) { + case *StoreAction_Node: + v := StoreAction_Node{ + Node: &Node{}, + } + deepcopy.Copy(v.Node, o.GetNode()) + m.Target = &v + case *StoreAction_Service: + v := StoreAction_Service{ + Service: &Service{}, + } + deepcopy.Copy(v.Service, o.GetService()) + m.Target = &v + case *StoreAction_Task: + v := StoreAction_Task{ + Task: &Task{}, + } + deepcopy.Copy(v.Task, o.GetTask()) + m.Target = &v + case *StoreAction_Network: + v := StoreAction_Network{ + Network: &Network{}, + } + deepcopy.Copy(v.Network, o.GetNetwork()) + m.Target = &v + case *StoreAction_Cluster: + v := StoreAction_Cluster{ + Cluster: &Cluster{}, + } + deepcopy.Copy(v.Cluster, o.GetCluster()) + m.Target = &v + case *StoreAction_Secret: + v := StoreAction_Secret{ + Secret: &Secret{}, + } + deepcopy.Copy(v.Secret, o.GetSecret()) + m.Target = &v + case *StoreAction_Resource: + v := StoreAction_Resource{ + Resource: &Resource{}, + } + deepcopy.Copy(v.Resource, o.GetResource()) + m.Target = &v + case *StoreAction_Extension: + v := StoreAction_Extension{ + Extension: &Extension{}, + } + deepcopy.Copy(v.Extension, o.GetExtension()) + m.Target = &v + case *StoreAction_Config: + v := StoreAction_Config{ + Config: &Config{}, + } + deepcopy.Copy(v.Config, o.GetConfig()) + m.Target = &v + } + } + +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Raft service + +type RaftClient interface { + // ProcessRaftMessage sends a raft message to be processed on a raft member, it is + // called from the RaftMember willing to send a message to its destination ('To' field) + ProcessRaftMessage(ctx context.Context, in *ProcessRaftMessageRequest, opts ...grpc.CallOption) (*ProcessRaftMessageResponse, error) + // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest + // to be processed on a raft member, returning a StreamRaftMessageResponse + // when processing of the streamed messages is complete. A single stream corresponds + // to a single raft message, which may be disassembled and streamed as individual messages. + // It is called from the Raft leader, which uses it to stream messages to a raft member. + StreamRaftMessage(ctx context.Context, opts ...grpc.CallOption) (Raft_StreamRaftMessageClient, error) + // ResolveAddress returns the address where the node with the given ID can be reached. + ResolveAddress(ctx context.Context, in *ResolveAddressRequest, opts ...grpc.CallOption) (*ResolveAddressResponse, error) +} + +type raftClient struct { + cc *grpc.ClientConn +} + +func NewRaftClient(cc *grpc.ClientConn) RaftClient { + return &raftClient{cc} +} + +func (c *raftClient) ProcessRaftMessage(ctx context.Context, in *ProcessRaftMessageRequest, opts ...grpc.CallOption) (*ProcessRaftMessageResponse, error) { + out := new(ProcessRaftMessageResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/ProcessRaftMessage", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *raftClient) StreamRaftMessage(ctx context.Context, opts ...grpc.CallOption) (Raft_StreamRaftMessageClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Raft_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Raft/StreamRaftMessage", opts...) + if err != nil { + return nil, err + } + x := &raftStreamRaftMessageClient{stream} + return x, nil +} + +type Raft_StreamRaftMessageClient interface { + Send(*StreamRaftMessageRequest) error + CloseAndRecv() (*StreamRaftMessageResponse, error) + grpc.ClientStream +} + +type raftStreamRaftMessageClient struct { + grpc.ClientStream +} + +func (x *raftStreamRaftMessageClient) Send(m *StreamRaftMessageRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *raftStreamRaftMessageClient) CloseAndRecv() (*StreamRaftMessageResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(StreamRaftMessageResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *raftClient) ResolveAddress(ctx context.Context, in *ResolveAddressRequest, opts ...grpc.CallOption) (*ResolveAddressResponse, error) { + out := new(ResolveAddressResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/ResolveAddress", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Raft service + +type RaftServer interface { + // ProcessRaftMessage sends a raft message to be processed on a raft member, it is + // called from the RaftMember willing to send a message to its destination ('To' field) + ProcessRaftMessage(context.Context, *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) + // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest + // to be processed on a raft member, returning a StreamRaftMessageResponse + // when processing of the streamed messages is complete. A single stream corresponds + // to a single raft message, which may be disassembled and streamed as individual messages. + // It is called from the Raft leader, which uses it to stream messages to a raft member. + StreamRaftMessage(Raft_StreamRaftMessageServer) error + // ResolveAddress returns the address where the node with the given ID can be reached. + ResolveAddress(context.Context, *ResolveAddressRequest) (*ResolveAddressResponse, error) +} + +func RegisterRaftServer(s *grpc.Server, srv RaftServer) { + s.RegisterService(&_Raft_serviceDesc, srv) +} + +func _Raft_ProcessRaftMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProcessRaftMessageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftServer).ProcessRaftMessage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Raft/ProcessRaftMessage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftServer).ProcessRaftMessage(ctx, req.(*ProcessRaftMessageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Raft_StreamRaftMessage_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RaftServer).StreamRaftMessage(&raftStreamRaftMessageServer{stream}) +} + +type Raft_StreamRaftMessageServer interface { + SendAndClose(*StreamRaftMessageResponse) error + Recv() (*StreamRaftMessageRequest, error) + grpc.ServerStream +} + +type raftStreamRaftMessageServer struct { + grpc.ServerStream +} + +func (x *raftStreamRaftMessageServer) SendAndClose(m *StreamRaftMessageResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *raftStreamRaftMessageServer) Recv() (*StreamRaftMessageRequest, error) { + m := new(StreamRaftMessageRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Raft_ResolveAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResolveAddressRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftServer).ResolveAddress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Raft/ResolveAddress", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftServer).ResolveAddress(ctx, req.(*ResolveAddressRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Raft_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Raft", + HandlerType: (*RaftServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ProcessRaftMessage", + Handler: _Raft_ProcessRaftMessage_Handler, + }, + { + MethodName: "ResolveAddress", + Handler: _Raft_ResolveAddress_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamRaftMessage", + Handler: _Raft_StreamRaftMessage_Handler, + ClientStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/raft.proto", +} + +// Client API for RaftMembership service + +type RaftMembershipClient interface { + // Join adds a RaftMember to the raft cluster. + Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*JoinResponse, error) + // Leave removes a RaftMember from the raft cluster. + Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*LeaveResponse, error) +} + +type raftMembershipClient struct { + cc *grpc.ClientConn +} + +func NewRaftMembershipClient(cc *grpc.ClientConn) RaftMembershipClient { + return &raftMembershipClient{cc} +} + +func (c *raftMembershipClient) Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*JoinResponse, error) { + out := new(JoinResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.RaftMembership/Join", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *raftMembershipClient) Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*LeaveResponse, error) { + out := new(LeaveResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.RaftMembership/Leave", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for RaftMembership service + +type RaftMembershipServer interface { + // Join adds a RaftMember to the raft cluster. + Join(context.Context, *JoinRequest) (*JoinResponse, error) + // Leave removes a RaftMember from the raft cluster. + Leave(context.Context, *LeaveRequest) (*LeaveResponse, error) +} + +func RegisterRaftMembershipServer(s *grpc.Server, srv RaftMembershipServer) { + s.RegisterService(&_RaftMembership_serviceDesc, srv) +} + +func _RaftMembership_Join_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(JoinRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftMembershipServer).Join(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.RaftMembership/Join", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftMembershipServer).Join(ctx, req.(*JoinRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RaftMembership_Leave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftMembershipServer).Leave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.RaftMembership/Leave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftMembershipServer).Leave(ctx, req.(*LeaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _RaftMembership_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.RaftMembership", + HandlerType: (*RaftMembershipServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Join", + Handler: _RaftMembership_Join_Handler, + }, + { + MethodName: "Leave", + Handler: _RaftMembership_Leave_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/raft.proto", +} + +func (m *RaftMember) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftMember) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RaftID)) + } + if len(m.NodeID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Status.Size())) + n1, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *JoinRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JoinRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addr) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *JoinResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JoinResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RaftID)) + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.RemovedMembers) > 0 { + for _, num := range m.RemovedMembers { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + return i, nil +} + +func (m *LeaveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Node.Size())) + n2, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *LeaveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ProcessRaftMessageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessRaftMessageRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Message != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Message.Size())) + n3, err := m.Message.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *ProcessRaftMessageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessRaftMessageResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *StreamRaftMessageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamRaftMessageRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Message != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Message.Size())) + n4, err := m.Message.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *StreamRaftMessageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamRaftMessageResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ResolveAddressRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveAddressRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RaftID)) + } + return i, nil +} + +func (m *ResolveAddressResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveAddressResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addr) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + } + if len(m.Action) > 0 { + for _, msg := range m.Action { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StoreAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreAction) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Action)) + } + if m.Target != nil { + nn5, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn5 + } + return i, nil +} + +func (m *StoreAction_Node) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Node != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Node.Size())) + n6, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *StoreAction_Service) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Service != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Service.Size())) + n7, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *StoreAction_Task) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Task != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Task.Size())) + n8, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *StoreAction_Network) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Network != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Network.Size())) + n9, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *StoreAction_Cluster) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Cluster != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Cluster.Size())) + n10, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} +func (m *StoreAction_Secret) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Secret != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Secret.Size())) + n11, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil +} +func (m *StoreAction_Resource) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Resource != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Resource.Size())) + n12, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *StoreAction_Extension) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Extension != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Extension.Size())) + n13, err := m.Extension.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} +func (m *StoreAction_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Config != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Config.Size())) + n14, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} +func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyRaftServer struct { + local RaftServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyRaftServer(local RaftServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) RaftServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyRaftServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyRaftServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyRaftServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ProcessRaftMessage(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftClient(conn).ProcessRaftMessage(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ProcessRaftMessage(ctx, r) + } + return nil, err + } + return NewRaftClient(conn).ProcessRaftMessage(modCtx, r) + } + return resp, err +} + +type Raft_StreamRaftMessageServerWrapper struct { + Raft_StreamRaftMessageServer + ctx context.Context +} + +func (s Raft_StreamRaftMessageServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyRaftServer) StreamRaftMessage(stream Raft_StreamRaftMessageServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Raft_StreamRaftMessageServerWrapper{ + Raft_StreamRaftMessageServer: stream, + ctx: ctx, + } + return p.local.StreamRaftMessage(streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewRaftClient(conn).StreamRaftMessage(ctx) + + if err != nil { + return err + } + + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := clientStream.Send(msg); err != nil { + return err + } + } + + reply, err := clientStream.CloseAndRecv() + if err != nil { + return err + } + + return stream.SendAndClose(reply) +} + +func (p *raftProxyRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ResolveAddress(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftClient(conn).ResolveAddress(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ResolveAddress(ctx, r) + } + return nil, err + } + return NewRaftClient(conn).ResolveAddress(modCtx, r) + } + return resp, err +} + +type raftProxyRaftMembershipServer struct { + local RaftMembershipServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyRaftMembershipServer(local RaftMembershipServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) RaftMembershipServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyRaftMembershipServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyRaftMembershipServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyRaftMembershipServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyRaftMembershipServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Join(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftMembershipClient(conn).Join(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Join(ctx, r) + } + return nil, err + } + return NewRaftMembershipClient(conn).Join(modCtx, r) + } + return resp, err +} + +func (p *raftProxyRaftMembershipServer) Leave(ctx context.Context, r *LeaveRequest) (*LeaveResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Leave(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftMembershipClient(conn).Leave(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Leave(ctx, r) + } + return nil, err + } + return NewRaftMembershipClient(conn).Leave(modCtx, r) + } + return resp, err +} + +func (m *RaftMember) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovRaft(uint64(m.RaftID)) + } + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + l = m.Status.Size() + n += 1 + l + sovRaft(uint64(l)) + return n +} + +func (m *JoinRequest) Size() (n int) { + var l int + _ = l + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *JoinResponse) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovRaft(uint64(m.RaftID)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + if len(m.RemovedMembers) > 0 { + for _, e := range m.RemovedMembers { + n += 1 + sovRaft(uint64(e)) + } + } + return n +} + +func (m *LeaveRequest) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *LeaveResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ProcessRaftMessageRequest) Size() (n int) { + var l int + _ = l + if m.Message != nil { + l = m.Message.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *ProcessRaftMessageResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *StreamRaftMessageRequest) Size() (n int) { + var l int + _ = l + if m.Message != nil { + l = m.Message.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *StreamRaftMessageResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ResolveAddressRequest) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovRaft(uint64(m.RaftID)) + } + return n +} + +func (m *ResolveAddressResponse) Size() (n int) { + var l int + _ = l + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *InternalRaftRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaft(uint64(m.ID)) + } + if len(m.Action) > 0 { + for _, e := range m.Action { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + return n +} + +func (m *StoreAction) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovRaft(uint64(m.Action)) + } + if m.Target != nil { + n += m.Target.Size() + } + return n +} + +func (m *StoreAction_Node) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Service) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Task) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Network) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Cluster) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Secret) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Resource) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Extension) Size() (n int) { + var l int + _ = l + if m.Extension != nil { + l = m.Extension.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Config) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func sovRaft(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaft(x uint64) (n int) { + return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RaftMember) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftMember{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "RaftMemberStatus", "RaftMemberStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JoinRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JoinRequest{`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *JoinResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JoinResponse{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `Members:` + strings.Replace(fmt.Sprintf("%v", this.Members), "RaftMember", "RaftMember", 1) + `,`, + `RemovedMembers:` + fmt.Sprintf("%v", this.RemovedMembers) + `,`, + `}`, + }, "") + return s +} +func (this *LeaveRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaveRequest{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "RaftMember", "RaftMember", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LeaveResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaveResponse{`, + `}`, + }, "") + return s +} +func (this *ProcessRaftMessageRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessRaftMessageRequest{`, + `Message:` + strings.Replace(fmt.Sprintf("%v", this.Message), "Message", "raftpb.Message", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProcessRaftMessageResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessRaftMessageResponse{`, + `}`, + }, "") + return s +} +func (this *StreamRaftMessageRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamRaftMessageRequest{`, + `Message:` + strings.Replace(fmt.Sprintf("%v", this.Message), "Message", "raftpb.Message", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StreamRaftMessageResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamRaftMessageResponse{`, + `}`, + }, "") + return s +} +func (this *ResolveAddressRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResolveAddressRequest{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `}`, + }, "") + return s +} +func (this *ResolveAddressResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResolveAddressResponse{`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *InternalRaftRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InternalRaftRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Action:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Action), "StoreAction", "StoreAction", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Node{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Service{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Task{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Network{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Cluster) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Cluster{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Secret{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Resource{`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "Resource", "Resource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Extension{`, + `Extension:` + strings.Replace(fmt.Sprintf("%v", this.Extension), "Extension", "Extension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Config{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringRaft(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RaftMember) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftMember: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftMember: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JoinRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JoinRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JoinRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JoinResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JoinResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JoinResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &RaftMember{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RemovedMembers = append(m.RemovedMembers, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RemovedMembers = append(m.RemovedMembers, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field RemovedMembers", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &RaftMember{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessRaftMessageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProcessRaftMessageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProcessRaftMessageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Message == nil { + m.Message = &raftpb.Message{} + } + if err := m.Message.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessRaftMessageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProcessRaftMessageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProcessRaftMessageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamRaftMessageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamRaftMessageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamRaftMessageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Message == nil { + m.Message = &raftpb.Message{} + } + if err := m.Message.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamRaftMessageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamRaftMessageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamRaftMessageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveAddressRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveAddressRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveAddressRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveAddressResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveAddressResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveAddressResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = append(m.Action, StoreAction{}) + if err := m.Action[len(m.Action)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (StoreActionKind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Node{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Node{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Service{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Service{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Task{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Task{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Network{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Network{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Cluster{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Cluster{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Secret{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Secret{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Resource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Resource{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Extension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Extension{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Config{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Config{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaft(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaft + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaft(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 1015 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xc1, 0x6e, 0x1b, 0x45, + 0x18, 0xc7, 0x77, 0xed, 0xad, 0xd3, 0x7c, 0x69, 0x93, 0x30, 0x25, 0x61, 0xb3, 0x2d, 0x8e, 0xbb, + 0x45, 0xc2, 0x09, 0xc9, 0x5a, 0x18, 0xa4, 0xa2, 0x42, 0x0f, 0x71, 0x62, 0x29, 0x26, 0xad, 0x53, + 0x6d, 0x12, 0xe8, 0x2d, 0xac, 0x77, 0x27, 0xee, 0x62, 0x7b, 0xc7, 0xcc, 0x8c, 0x1d, 0xb8, 0xa0, + 0x1e, 0x21, 0x2f, 0x00, 0x42, 0xaa, 0x38, 0xc0, 0xb9, 0x0f, 0xc0, 0x03, 0xa0, 0x88, 0x13, 0x37, + 0x38, 0x45, 0xd4, 0x0f, 0x00, 0xaf, 0x80, 0x66, 0x76, 0xd7, 0x31, 0xf6, 0xda, 0xf1, 0x81, 0x4b, + 0x32, 0xda, 0xf9, 0xfd, 0xbf, 0xff, 0x37, 0x33, 0xdf, 0x7c, 0x63, 0x58, 0xab, 0xfb, 0xfc, 0x59, + 0xa7, 0x66, 0xb9, 0xa4, 0x55, 0xf0, 0x88, 0xdb, 0xc0, 0xb4, 0xc0, 0x4e, 0x1d, 0xda, 0x6a, 0xf8, + 0xbc, 0xe0, 0xb4, 0xfd, 0x02, 0x75, 0x4e, 0xb8, 0xd5, 0xa6, 0x84, 0x13, 0x84, 0xc2, 0x79, 0x2b, + 0x9e, 0xb7, 0xba, 0xef, 0x1a, 0x1b, 0x57, 0xc8, 0x49, 0xed, 0x73, 0xec, 0x72, 0x16, 0x46, 0x30, + 0xd6, 0xaf, 0xa0, 0xf9, 0x57, 0x6d, 0x1c, 0xb3, 0x9b, 0x03, 0xac, 0x4b, 0x28, 0x26, 0xac, 0x80, + 0xb9, 0xeb, 0xc9, 0x84, 0xe4, 0x9f, 0x76, 0x6d, 0x20, 0x39, 0xe3, 0xf5, 0x3a, 0xa9, 0x13, 0x39, + 0x2c, 0x88, 0x51, 0xf4, 0xf5, 0xfe, 0x04, 0x43, 0x49, 0xd4, 0x3a, 0x27, 0x85, 0x76, 0xb3, 0x53, + 0xf7, 0x83, 0xe8, 0x5f, 0x28, 0x34, 0x5f, 0xaa, 0x00, 0xb6, 0x73, 0xc2, 0x1f, 0xe3, 0x56, 0x0d, + 0x53, 0x74, 0x0f, 0x66, 0x84, 0xd7, 0xb1, 0xef, 0xe9, 0x6a, 0x4e, 0xcd, 0x6b, 0x25, 0xe8, 0x5d, + 0xac, 0x66, 0x04, 0x50, 0xd9, 0xb1, 0x33, 0x62, 0xaa, 0xe2, 0x09, 0x28, 0x20, 0x1e, 0x16, 0x50, + 0x2a, 0xa7, 0xe6, 0x67, 0x43, 0xa8, 0x4a, 0x3c, 0x2c, 0x20, 0x31, 0x55, 0xf1, 0x10, 0x02, 0xcd, + 0xf1, 0x3c, 0xaa, 0xa7, 0x05, 0x61, 0xcb, 0x31, 0x2a, 0x41, 0x86, 0x71, 0x87, 0x77, 0x98, 0xae, + 0xe5, 0xd4, 0xfc, 0x5c, 0xf1, 0x2d, 0x6b, 0x74, 0xa7, 0xad, 0xcb, 0x6c, 0x0e, 0x24, 0x5b, 0xd2, + 0xce, 0x2f, 0x56, 0x15, 0x3b, 0x52, 0x9a, 0x77, 0x61, 0xee, 0x63, 0xe2, 0x07, 0x36, 0xfe, 0xa2, + 0x83, 0x19, 0xef, 0xdb, 0xa8, 0x97, 0x36, 0xe6, 0x0f, 0x2a, 0xdc, 0x08, 0x19, 0xd6, 0x26, 0x01, + 0xc3, 0xd3, 0xad, 0xea, 0x03, 0x98, 0x69, 0x49, 0x5b, 0xa6, 0xa7, 0x72, 0xe9, 0xfc, 0x5c, 0x31, + 0x3b, 0x39, 0x3b, 0x3b, 0xc6, 0xd1, 0x3b, 0xb0, 0x40, 0x71, 0x8b, 0x74, 0xb1, 0x77, 0x1c, 0x47, + 0x48, 0xe7, 0xd2, 0x79, 0xad, 0x94, 0x5a, 0x54, 0xec, 0xf9, 0x68, 0x2a, 0x14, 0x31, 0xb3, 0x04, + 0x37, 0x1e, 0x61, 0xa7, 0x8b, 0xe3, 0x05, 0x14, 0x41, 0x13, 0x3b, 0x26, 0x13, 0xbb, 0xda, 0x53, + 0xb2, 0xe6, 0x02, 0xdc, 0x8c, 0x62, 0x84, 0x0b, 0x34, 0x1f, 0xc1, 0xca, 0x13, 0x4a, 0x5c, 0xcc, + 0x58, 0xc8, 0x32, 0xe6, 0xd4, 0xfb, 0x0e, 0x6b, 0x62, 0x61, 0xf2, 0x4b, 0x64, 0xb2, 0x60, 0x85, + 0x65, 0x65, 0xc5, 0x60, 0x3c, 0xff, 0x40, 0x7b, 0xfe, 0x9d, 0xa9, 0x98, 0x77, 0xc0, 0x48, 0x8a, + 0x16, 0x79, 0xed, 0x81, 0x7e, 0xc0, 0x29, 0x76, 0x5a, 0xff, 0x87, 0xd5, 0x6d, 0x58, 0x49, 0x08, + 0x16, 0x39, 0x7d, 0x04, 0x4b, 0x36, 0x66, 0xa4, 0xd9, 0xc5, 0x5b, 0x9e, 0x47, 0x45, 0x3a, 0x91, + 0xcd, 0x34, 0xe7, 0x69, 0x6e, 0xc0, 0xf2, 0xb0, 0x3a, 0x2a, 0x87, 0xa4, 0x9a, 0x69, 0xc2, 0xad, + 0x4a, 0xc0, 0x31, 0x0d, 0x9c, 0xa6, 0x88, 0x13, 0x3b, 0x2d, 0x43, 0xaa, 0x6f, 0x92, 0xe9, 0x5d, + 0xac, 0xa6, 0x2a, 0x3b, 0x76, 0xca, 0xf7, 0xd0, 0x43, 0xc8, 0x38, 0x2e, 0xf7, 0x49, 0x10, 0xd5, + 0xca, 0x6a, 0xd2, 0xb9, 0x1d, 0x70, 0x42, 0xf1, 0x96, 0xc4, 0xe2, 0x22, 0x0e, 0x45, 0xe6, 0xaf, + 0x1a, 0xcc, 0x0d, 0xcc, 0xa2, 0x0f, 0xfb, 0xe1, 0x84, 0xd5, 0x7c, 0xf1, 0xde, 0x15, 0xe1, 0xf6, + 0xfc, 0xc0, 0x8b, 0x83, 0x21, 0x2b, 0xaa, 0xa0, 0x94, 0xdc, 0x71, 0x3d, 0x49, 0x2a, 0xee, 0xe6, + 0xae, 0x12, 0x56, 0x0f, 0xba, 0x0f, 0x33, 0x0c, 0xd3, 0xae, 0xef, 0x62, 0x79, 0x39, 0xe7, 0x8a, + 0xb7, 0x13, 0xdd, 0x42, 0x64, 0x57, 0xb1, 0x63, 0x5a, 0x18, 0x71, 0x87, 0x35, 0xa2, 0xcb, 0x9b, + 0x68, 0x74, 0xe8, 0xb0, 0x86, 0x30, 0x12, 0x9c, 0x30, 0x0a, 0x30, 0x3f, 0x25, 0xb4, 0xa1, 0x5f, + 0x1b, 0x6f, 0x54, 0x0d, 0x11, 0x61, 0x14, 0xd1, 0x42, 0xe8, 0x36, 0x3b, 0x8c, 0x63, 0xaa, 0x67, + 0xc6, 0x0b, 0xb7, 0x43, 0x44, 0x08, 0x23, 0x1a, 0xbd, 0x0f, 0x19, 0x86, 0x5d, 0x8a, 0xb9, 0x3e, + 0x23, 0x75, 0x46, 0xf2, 0xca, 0x04, 0xb1, 0x2b, 0x5a, 0x8a, 0x1c, 0xa1, 0x07, 0x70, 0x9d, 0x62, + 0x46, 0x3a, 0xd4, 0xc5, 0xfa, 0x75, 0xa9, 0xbb, 0x93, 0x78, 0x0d, 0x23, 0x66, 0x57, 0xb1, 0xfb, + 0x3c, 0x7a, 0x08, 0xb3, 0xf8, 0x4b, 0x8e, 0x03, 0x26, 0x0e, 0x6f, 0x56, 0x8a, 0xdf, 0x4c, 0x12, + 0x97, 0x63, 0x68, 0x57, 0xb1, 0x2f, 0x15, 0x22, 0x61, 0x97, 0x04, 0x27, 0x7e, 0x5d, 0x87, 0xf1, + 0x09, 0x6f, 0x4b, 0x42, 0x24, 0x1c, 0xb2, 0xa5, 0xeb, 0x90, 0xe1, 0x0e, 0xad, 0x63, 0xbe, 0xfe, + 0x8f, 0x0a, 0x0b, 0x43, 0x75, 0x81, 0xde, 0x86, 0x99, 0xa3, 0xea, 0x5e, 0x75, 0xff, 0xd3, 0xea, + 0xa2, 0x62, 0x18, 0x67, 0x2f, 0x72, 0xcb, 0x43, 0xc4, 0x51, 0xd0, 0x08, 0xc8, 0x69, 0x80, 0x8a, + 0x70, 0xeb, 0xe0, 0x70, 0xdf, 0x2e, 0x1f, 0x6f, 0x6d, 0x1f, 0x56, 0xf6, 0xab, 0xc7, 0xdb, 0x76, + 0x79, 0xeb, 0xb0, 0xbc, 0xa8, 0x1a, 0x2b, 0x67, 0x2f, 0x72, 0x4b, 0x43, 0xa2, 0x6d, 0x8a, 0x1d, + 0x8e, 0x47, 0x34, 0x47, 0x4f, 0x76, 0x84, 0x26, 0x95, 0xa8, 0x39, 0x6a, 0x7b, 0x49, 0x1a, 0xbb, + 0xfc, 0x78, 0xff, 0x93, 0xf2, 0x62, 0x3a, 0x51, 0x63, 0xcb, 0x76, 0x69, 0xbc, 0xf1, 0xcd, 0x4f, + 0x59, 0xe5, 0x97, 0x9f, 0xb3, 0xc3, 0xab, 0x2b, 0xfe, 0x98, 0x06, 0x4d, 0xdc, 0x50, 0x74, 0xa6, + 0x02, 0x1a, 0x6d, 0x53, 0x68, 0x33, 0x69, 0x07, 0xc7, 0x36, 0x47, 0xc3, 0x9a, 0x16, 0x8f, 0x7a, + 0xd2, 0xd2, 0x6f, 0x2f, 0xff, 0xfe, 0x3e, 0xb5, 0x00, 0x37, 0x25, 0xbf, 0xd9, 0x72, 0x02, 0xa7, + 0x8e, 0x29, 0xfa, 0x56, 0x85, 0xd7, 0x46, 0x1a, 0x19, 0xda, 0x48, 0xbe, 0xc6, 0xc9, 0xcd, 0xd3, + 0xd8, 0x9c, 0x92, 0x9e, 0x98, 0x49, 0x5e, 0x45, 0x5f, 0xc3, 0xfc, 0x7f, 0x1b, 0x1f, 0x5a, 0x1b, + 0x57, 0xce, 0x23, 0xad, 0xd5, 0x58, 0x9f, 0x06, 0x9d, 0x98, 0x41, 0xf1, 0x0f, 0x15, 0xe6, 0x2f, + 0x9f, 0x2c, 0xf6, 0xcc, 0x6f, 0xa3, 0xcf, 0x40, 0x13, 0x0f, 0x32, 0x4a, 0x6c, 0x93, 0x03, 0xcf, + 0xb9, 0x91, 0x1b, 0x0f, 0x4c, 0x3e, 0x00, 0x17, 0xae, 0xc9, 0x27, 0x11, 0x25, 0x46, 0x18, 0x7c, + 0x71, 0x8d, 0xbb, 0x13, 0x88, 0x89, 0x26, 0x25, 0xfd, 0xfc, 0x55, 0x56, 0xf9, 0xf3, 0x55, 0x56, + 0x79, 0xde, 0xcb, 0xaa, 0xe7, 0xbd, 0xac, 0xfa, 0x7b, 0x2f, 0xab, 0xfe, 0xd5, 0xcb, 0xaa, 0x4f, + 0xd3, 0x4f, 0xb5, 0x5a, 0x46, 0xfe, 0xa2, 0x7a, 0xef, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, + 0x7a, 0x8b, 0xe7, 0x6a, 0x0a, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/raft.proto b/extender/code/vendor/github.com/docker/swarmkit/api/raft.proto new file mode 100644 index 000000000..b351c15bd --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/raft.proto @@ -0,0 +1,150 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/objects.proto"; +import "github.com/docker/swarmkit/api/types.proto"; +import "github.com/coreos/etcd/raft/raftpb/raft.proto"; +import weak "gogoproto/gogo.proto"; +import weak "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// Raft defines the RPC communication between raft nodes. +service Raft { + // ProcessRaftMessage sends a raft message to be processed on a raft member, it is + // called from the RaftMember willing to send a message to its destination ('To' field) + rpc ProcessRaftMessage(ProcessRaftMessageRequest) returns (ProcessRaftMessageResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest + // to be processed on a raft member, returning a StreamRaftMessageResponse + // when processing of the streamed messages is complete. A single stream corresponds + // to a single raft message, which may be disassembled and streamed as individual messages. + // It is called from the Raft leader, which uses it to stream messages to a raft member. + rpc StreamRaftMessage(stream StreamRaftMessageRequest) returns (StreamRaftMessageResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // ResolveAddress returns the address where the node with the given ID can be reached. + rpc ResolveAddress(ResolveAddressRequest) returns (ResolveAddressResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; +} + +// RaftMembership defines RPCs for adding and removing members from the +// cluster. These RPCs must always run on the leader, so they are in a separate +// service to support the raft proxy. +service RaftMembership { + // Join adds a RaftMember to the raft cluster. + rpc Join(JoinRequest) returns (JoinResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // Leave removes a RaftMember from the raft cluster. + rpc Leave(LeaveRequest) returns (LeaveResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; +} + +message RaftMember { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + uint64 raft_id = 1; + + // NodeID is the node's ID. + string node_id = 2; + + // Addr specifies the address of the member + string addr = 3; + + // Status provides the current status of the manager from the perspective of another manager. + RaftMemberStatus status = 4 [(gogoproto.nullable) = false]; +} + +message JoinRequest { + // Addr specifies the address of the member + string addr = 1; +} + +message JoinResponse { + // RaftID is the ID assigned to the new member. + uint64 raft_id = 1; + + // Members is the membership set of the cluster. + repeated RaftMember members = 2; + + // RemovedMembers is a list of members that have been removed from + // the cluster, so the new node can avoid communicating with them. + repeated uint64 removed_members = 3 [packed=false]; +} + +message LeaveRequest { + RaftMember node = 1; +} + +message LeaveResponse {} + +message ProcessRaftMessageRequest { + option (docker.protobuf.plugin.deepcopy) = false; + raftpb.Message message = 1; +} + +message ProcessRaftMessageResponse {} + +// Raft message streaming request. +message StreamRaftMessageRequest { + option (docker.protobuf.plugin.deepcopy) = false; + raftpb.Message message = 1; +} + +// Raft message streaming response. +message StreamRaftMessageResponse {} + +message ResolveAddressRequest { + // raft_id is the ID to resolve to an address. + uint64 raft_id = 1; +} + +message ResolveAddressResponse { + // Addr specifies the address of the member + string addr = 1; +} + +// Contains one of many protobuf encoded objects to replicate +// over the raft backend with a request ID to track when the +// action is effectively applied +message InternalRaftRequest { + uint64 id = 1; + + repeated StoreAction action = 2 [(gogoproto.nullable) = false]; +} + +// TODO(stevvooe): Storage actions may belong in another protobuf file. They +// aren't necessarily first-class "types" in the cluster schema. + +// StoreActionKind defines the operation to take on the store for the target of +// a storage action. +enum StoreActionKind { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "StoreActionKind"; + UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StoreActionKindUnknown"]; // default value, invalid + STORE_ACTION_CREATE = 1 [(gogoproto.enumvalue_customname) = "StoreActionKindCreate"]; + STORE_ACTION_UPDATE = 2 [(gogoproto.enumvalue_customname) = "StoreActionKindUpdate"]; + STORE_ACTION_REMOVE = 3 [(gogoproto.enumvalue_customname) = "StoreActionKindRemove"]; +} + +// StoreAction defines a target and operation to apply on the storage system. +message StoreAction { + StoreActionKind action = 1; + oneof target { + Node node = 2; + Service service = 3; + Task task = 4; + Network network = 5; + Cluster cluster = 6; + Secret secret = 7; + Resource resource = 8; + Extension extension = 9; + Config config = 10; + } +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/resource.pb.go b/extender/code/vendor/github.com/docker/swarmkit/api/resource.pb.go new file mode 100644 index 000000000..2d4741993 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/resource.pb.go @@ -0,0 +1,1075 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/resource.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AttachNetworkRequest struct { + Config *NetworkAttachmentConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + ContainerID string `protobuf:"bytes,2,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *AttachNetworkRequest) Reset() { *m = AttachNetworkRequest{} } +func (*AttachNetworkRequest) ProtoMessage() {} +func (*AttachNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{0} } + +type AttachNetworkResponse struct { + AttachmentID string `protobuf:"bytes,1,opt,name=attachment_id,json=attachmentId,proto3" json:"attachment_id,omitempty"` +} + +func (m *AttachNetworkResponse) Reset() { *m = AttachNetworkResponse{} } +func (*AttachNetworkResponse) ProtoMessage() {} +func (*AttachNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{1} } + +type DetachNetworkRequest struct { + AttachmentID string `protobuf:"bytes,1,opt,name=attachment_id,json=attachmentId,proto3" json:"attachment_id,omitempty"` +} + +func (m *DetachNetworkRequest) Reset() { *m = DetachNetworkRequest{} } +func (*DetachNetworkRequest) ProtoMessage() {} +func (*DetachNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{2} } + +type DetachNetworkResponse struct { +} + +func (m *DetachNetworkResponse) Reset() { *m = DetachNetworkResponse{} } +func (*DetachNetworkResponse) ProtoMessage() {} +func (*DetachNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{3} } + +func init() { + proto.RegisterType((*AttachNetworkRequest)(nil), "docker.swarmkit.v1.AttachNetworkRequest") + proto.RegisterType((*AttachNetworkResponse)(nil), "docker.swarmkit.v1.AttachNetworkResponse") + proto.RegisterType((*DetachNetworkRequest)(nil), "docker.swarmkit.v1.DetachNetworkRequest") + proto.RegisterType((*DetachNetworkResponse)(nil), "docker.swarmkit.v1.DetachNetworkResponse") +} + +type authenticatedWrapperResourceAllocatorServer struct { + local ResourceAllocatorServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperResourceAllocatorServer(local ResourceAllocatorServer, authorize func(context.Context, []string) error) ResourceAllocatorServer { + return &authenticatedWrapperResourceAllocatorServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperResourceAllocatorServer) AttachNetwork(ctx context.Context, r *AttachNetworkRequest) (*AttachNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.AttachNetwork(ctx, r) +} + +func (p *authenticatedWrapperResourceAllocatorServer) DetachNetwork(ctx context.Context, r *DetachNetworkRequest) (*DetachNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.DetachNetwork(ctx, r) +} + +func (m *AttachNetworkRequest) Copy() *AttachNetworkRequest { + if m == nil { + return nil + } + o := &AttachNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *AttachNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*AttachNetworkRequest) + *m = *o + if o.Config != nil { + m.Config = &NetworkAttachmentConfig{} + deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *AttachNetworkResponse) Copy() *AttachNetworkResponse { + if m == nil { + return nil + } + o := &AttachNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *AttachNetworkResponse) CopyFrom(src interface{}) { + + o := src.(*AttachNetworkResponse) + *m = *o +} + +func (m *DetachNetworkRequest) Copy() *DetachNetworkRequest { + if m == nil { + return nil + } + o := &DetachNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *DetachNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*DetachNetworkRequest) + *m = *o +} + +func (m *DetachNetworkResponse) Copy() *DetachNetworkResponse { + if m == nil { + return nil + } + o := &DetachNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *DetachNetworkResponse) CopyFrom(src interface{}) {} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ResourceAllocator service + +type ResourceAllocatorClient interface { + AttachNetwork(ctx context.Context, in *AttachNetworkRequest, opts ...grpc.CallOption) (*AttachNetworkResponse, error) + DetachNetwork(ctx context.Context, in *DetachNetworkRequest, opts ...grpc.CallOption) (*DetachNetworkResponse, error) +} + +type resourceAllocatorClient struct { + cc *grpc.ClientConn +} + +func NewResourceAllocatorClient(cc *grpc.ClientConn) ResourceAllocatorClient { + return &resourceAllocatorClient{cc} +} + +func (c *resourceAllocatorClient) AttachNetwork(ctx context.Context, in *AttachNetworkRequest, opts ...grpc.CallOption) (*AttachNetworkResponse, error) { + out := new(AttachNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.ResourceAllocator/AttachNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourceAllocatorClient) DetachNetwork(ctx context.Context, in *DetachNetworkRequest, opts ...grpc.CallOption) (*DetachNetworkResponse, error) { + out := new(DetachNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.ResourceAllocator/DetachNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ResourceAllocator service + +type ResourceAllocatorServer interface { + AttachNetwork(context.Context, *AttachNetworkRequest) (*AttachNetworkResponse, error) + DetachNetwork(context.Context, *DetachNetworkRequest) (*DetachNetworkResponse, error) +} + +func RegisterResourceAllocatorServer(s *grpc.Server, srv ResourceAllocatorServer) { + s.RegisterService(&_ResourceAllocator_serviceDesc, srv) +} + +func _ResourceAllocator_AttachNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AttachNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceAllocatorServer).AttachNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.ResourceAllocator/AttachNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceAllocatorServer).AttachNetwork(ctx, req.(*AttachNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourceAllocator_DetachNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DetachNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceAllocatorServer).DetachNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.ResourceAllocator/DetachNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceAllocatorServer).DetachNetwork(ctx, req.(*DetachNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ResourceAllocator_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.ResourceAllocator", + HandlerType: (*ResourceAllocatorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AttachNetwork", + Handler: _ResourceAllocator_AttachNetwork_Handler, + }, + { + MethodName: "DetachNetwork", + Handler: _ResourceAllocator_DetachNetwork_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/resource.proto", +} + +func (m *AttachNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttachNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintResource(dAtA, i, uint64(m.Config.Size())) + n1, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.ContainerID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintResource(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *AttachNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttachNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AttachmentID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintResource(dAtA, i, uint64(len(m.AttachmentID))) + i += copy(dAtA[i:], m.AttachmentID) + } + return i, nil +} + +func (m *DetachNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DetachNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AttachmentID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintResource(dAtA, i, uint64(len(m.AttachmentID))) + i += copy(dAtA[i:], m.AttachmentID) + } + return i, nil +} + +func (m *DetachNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DetachNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintResource(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyResourceAllocatorServer struct { + local ResourceAllocatorServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) ResourceAllocatorServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyResourceAllocatorServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyResourceAllocatorServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyResourceAllocatorServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyResourceAllocatorServer) AttachNetwork(ctx context.Context, r *AttachNetworkRequest) (*AttachNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.AttachNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewResourceAllocatorClient(conn).AttachNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.AttachNetwork(ctx, r) + } + return nil, err + } + return NewResourceAllocatorClient(conn).AttachNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyResourceAllocatorServer) DetachNetwork(ctx context.Context, r *DetachNetworkRequest) (*DetachNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.DetachNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewResourceAllocatorClient(conn).DetachNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.DetachNetwork(ctx, r) + } + return nil, err + } + return NewResourceAllocatorClient(conn).DetachNetwork(modCtx, r) + } + return resp, err +} + +func (m *AttachNetworkRequest) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovResource(uint64(l)) + } + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + return n +} + +func (m *AttachNetworkResponse) Size() (n int) { + var l int + _ = l + l = len(m.AttachmentID) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + return n +} + +func (m *DetachNetworkRequest) Size() (n int) { + var l int + _ = l + l = len(m.AttachmentID) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + return n +} + +func (m *DetachNetworkResponse) Size() (n int) { + var l int + _ = l + return n +} + +func sovResource(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozResource(x uint64) (n int) { + return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AttachNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachNetworkRequest{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NetworkAttachmentConfig", "NetworkAttachmentConfig", 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *AttachNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachNetworkResponse{`, + `AttachmentID:` + fmt.Sprintf("%v", this.AttachmentID) + `,`, + `}`, + }, "") + return s +} +func (this *DetachNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DetachNetworkRequest{`, + `AttachmentID:` + fmt.Sprintf("%v", this.AttachmentID) + `,`, + `}`, + }, "") + return s +} +func (this *DetachNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DetachNetworkResponse{`, + `}`, + }, "") + return s +} +func valueToStringResource(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AttachNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &NetworkAttachmentConfig{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttachmentID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DetachNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DetachNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DetachNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttachmentID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DetachNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DetachNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DetachNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipResource(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthResource + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipResource(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowResource = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/resource.proto", fileDescriptorResource) +} + +var fileDescriptorResource = []byte{ + // 397 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xcf, 0x4e, 0xf2, 0x40, + 0x14, 0xc5, 0x19, 0x16, 0x24, 0xdf, 0x50, 0xf2, 0x69, 0x03, 0x91, 0x90, 0x58, 0x48, 0xdd, 0xa0, + 0x86, 0x36, 0x62, 0x8c, 0x6b, 0xfe, 0x6c, 0xba, 0x90, 0x45, 0x5f, 0xc0, 0x0c, 0xed, 0x50, 0x1a, + 0x68, 0xa7, 0x4e, 0xa7, 0x12, 0x77, 0x6e, 0x5d, 0xb9, 0xf5, 0x1d, 0x4c, 0x7c, 0x0e, 0xe2, 0xca, + 0xa5, 0x2b, 0x22, 0x7d, 0x00, 0x9f, 0xc1, 0xd0, 0x29, 0x10, 0x70, 0xa2, 0xc4, 0x55, 0xa7, 0xd3, + 0x73, 0xce, 0xfd, 0xdd, 0x7b, 0x0b, 0x1b, 0x8e, 0xcb, 0x86, 0x51, 0x5f, 0xb3, 0x88, 0xa7, 0xdb, + 0xc4, 0x1a, 0x61, 0xaa, 0x87, 0x13, 0x44, 0xbd, 0x91, 0xcb, 0x74, 0x14, 0xb8, 0x3a, 0xc5, 0x21, + 0x89, 0xa8, 0x85, 0xb5, 0x80, 0x12, 0x46, 0x64, 0x99, 0x6b, 0xb4, 0xa5, 0x46, 0xbb, 0x3d, 0xab, + 0x9c, 0xfc, 0x12, 0xc1, 0xee, 0x02, 0x1c, 0x72, 0x7f, 0xa5, 0xe8, 0x10, 0x87, 0x24, 0x47, 0x7d, + 0x71, 0x4a, 0x6f, 0x2f, 0x7f, 0x48, 0x48, 0x14, 0xfd, 0x68, 0xa0, 0x07, 0xe3, 0xc8, 0x71, 0xfd, + 0xf4, 0xc1, 0x8d, 0xea, 0x23, 0x80, 0xc5, 0x16, 0x63, 0xc8, 0x1a, 0xf6, 0x30, 0x9b, 0x10, 0x3a, + 0x32, 0xf1, 0x4d, 0x84, 0x43, 0x26, 0x77, 0x60, 0xce, 0x22, 0xfe, 0xc0, 0x75, 0xca, 0xa0, 0x06, + 0xea, 0xf9, 0xe6, 0xa9, 0xf6, 0x1d, 0x5c, 0x4b, 0x3d, 0x3c, 0xc0, 0xc3, 0x3e, 0xeb, 0x24, 0x16, + 0x33, 0xb5, 0xca, 0x4d, 0x28, 0x59, 0xc4, 0x67, 0xc8, 0xf5, 0x31, 0xbd, 0x76, 0xed, 0x72, 0xb6, + 0x06, 0xea, 0xff, 0xda, 0xff, 0xe3, 0x59, 0x35, 0xdf, 0x59, 0xde, 0x1b, 0x5d, 0x33, 0xbf, 0x12, + 0x19, 0xb6, 0xda, 0x83, 0xa5, 0x2d, 0xa0, 0x30, 0x20, 0x7e, 0x88, 0xe5, 0x0b, 0x58, 0x40, 0xab, + 0x42, 0x8b, 0x34, 0x90, 0xa4, 0xed, 0xc5, 0xb3, 0xaa, 0xb4, 0x26, 0x30, 0xba, 0xa6, 0xb4, 0x96, + 0x19, 0xb6, 0x7a, 0x05, 0x8b, 0x5d, 0x2c, 0x68, 0xf0, 0x8f, 0x71, 0x07, 0xb0, 0xb4, 0x15, 0xc7, + 0xf1, 0x9a, 0xcf, 0x59, 0xb8, 0x6f, 0xa6, 0xbb, 0x6e, 0x8d, 0xc7, 0xc4, 0x42, 0x8c, 0x50, 0xf9, + 0x01, 0xc0, 0xc2, 0x46, 0x3b, 0x72, 0x5d, 0x34, 0x48, 0xd1, 0x0a, 0x2a, 0xc7, 0x3b, 0x28, 0x79, + 0x71, 0xf5, 0xe8, 0xf5, 0xe5, 0xf3, 0x29, 0x7b, 0x08, 0xa5, 0x44, 0xda, 0x58, 0x7c, 0xc3, 0x14, + 0x16, 0xf8, 0x9b, 0x87, 0x7c, 0xe4, 0x60, 0xce, 0xb2, 0xc1, 0x2e, 0x66, 0x11, 0x4d, 0x4b, 0xcc, + 0x22, 0x1c, 0xc4, 0x4e, 0x2c, 0xed, 0xf2, 0x74, 0xae, 0x64, 0xde, 0xe7, 0x4a, 0xe6, 0x3e, 0x56, + 0xc0, 0x34, 0x56, 0xc0, 0x5b, 0xac, 0x80, 0x8f, 0x58, 0x01, 0xfd, 0x5c, 0xf2, 0x63, 0x9e, 0x7f, + 0x05, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x7a, 0x29, 0xfc, 0x58, 0x03, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/resource.proto b/extender/code/vendor/github.com/docker/swarmkit/api/resource.proto new file mode 100644 index 000000000..ecaa749ee --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/resource.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// Allocator is the API provided by a manager group for agents to control the allocation of certain entities. +// +// API methods on this service are used only by agent nodes. +service ResourceAllocator { + rpc AttachNetwork(AttachNetworkRequest) returns (AttachNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; + rpc DetachNetwork(DetachNetworkRequest) returns (DetachNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; +} + +message AttachNetworkRequest { + NetworkAttachmentConfig config = 1; + string container_id = 2; +} + +message AttachNetworkResponse { + string attachment_id = 1; +} + +message DetachNetworkRequest { + string attachment_id = 1; +} + +message DetachNetworkResponse {} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/snapshot.pb.go b/extender/code/vendor/github.com/docker/swarmkit/api/snapshot.pb.go new file mode 100644 index 000000000..4d6893a90 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/snapshot.pb.go @@ -0,0 +1,1326 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/snapshot.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Snapshot_Version int32 + +const ( + // V0 is the initial version of the StoreSnapshot message. + Snapshot_V0 Snapshot_Version = 0 +) + +var Snapshot_Version_name = map[int32]string{ + 0: "V0", +} +var Snapshot_Version_value = map[string]int32{ + "V0": 0, +} + +func (x Snapshot_Version) String() string { + return proto.EnumName(Snapshot_Version_name, int32(x)) +} +func (Snapshot_Version) EnumDescriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2, 0} } + +// StoreSnapshot is used to store snapshots of the store. +type StoreSnapshot struct { + Nodes []*Node `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"` + Services []*Service `protobuf:"bytes,2,rep,name=services" json:"services,omitempty"` + Networks []*Network `protobuf:"bytes,3,rep,name=networks" json:"networks,omitempty"` + Tasks []*Task `protobuf:"bytes,4,rep,name=tasks" json:"tasks,omitempty"` + Clusters []*Cluster `protobuf:"bytes,5,rep,name=clusters" json:"clusters,omitempty"` + Secrets []*Secret `protobuf:"bytes,6,rep,name=secrets" json:"secrets,omitempty"` + Resources []*Resource `protobuf:"bytes,7,rep,name=resources" json:"resources,omitempty"` + Extensions []*Extension `protobuf:"bytes,8,rep,name=extensions" json:"extensions,omitempty"` + Configs []*Config `protobuf:"bytes,9,rep,name=configs" json:"configs,omitempty"` +} + +func (m *StoreSnapshot) Reset() { *m = StoreSnapshot{} } +func (*StoreSnapshot) ProtoMessage() {} +func (*StoreSnapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{0} } + +// ClusterSnapshot stores cluster membership information in snapshots. +type ClusterSnapshot struct { + Members []*RaftMember `protobuf:"bytes,1,rep,name=members" json:"members,omitempty"` + Removed []uint64 `protobuf:"varint,2,rep,name=removed" json:"removed,omitempty"` +} + +func (m *ClusterSnapshot) Reset() { *m = ClusterSnapshot{} } +func (*ClusterSnapshot) ProtoMessage() {} +func (*ClusterSnapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{1} } + +type Snapshot struct { + Version Snapshot_Version `protobuf:"varint,1,opt,name=version,proto3,enum=docker.swarmkit.v1.Snapshot_Version" json:"version,omitempty"` + Membership ClusterSnapshot `protobuf:"bytes,2,opt,name=membership" json:"membership"` + Store StoreSnapshot `protobuf:"bytes,3,opt,name=store" json:"store"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} } + +func init() { + proto.RegisterType((*StoreSnapshot)(nil), "docker.swarmkit.v1.StoreSnapshot") + proto.RegisterType((*ClusterSnapshot)(nil), "docker.swarmkit.v1.ClusterSnapshot") + proto.RegisterType((*Snapshot)(nil), "docker.swarmkit.v1.Snapshot") + proto.RegisterEnum("docker.swarmkit.v1.Snapshot_Version", Snapshot_Version_name, Snapshot_Version_value) +} + +func (m *StoreSnapshot) Copy() *StoreSnapshot { + if m == nil { + return nil + } + o := &StoreSnapshot{} + o.CopyFrom(m) + return o +} + +func (m *StoreSnapshot) CopyFrom(src interface{}) { + + o := src.(*StoreSnapshot) + *m = *o + if o.Nodes != nil { + m.Nodes = make([]*Node, len(o.Nodes)) + for i := range m.Nodes { + m.Nodes[i] = &Node{} + deepcopy.Copy(m.Nodes[i], o.Nodes[i]) + } + } + + if o.Services != nil { + m.Services = make([]*Service, len(o.Services)) + for i := range m.Services { + m.Services[i] = &Service{} + deepcopy.Copy(m.Services[i], o.Services[i]) + } + } + + if o.Networks != nil { + m.Networks = make([]*Network, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &Network{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.Tasks != nil { + m.Tasks = make([]*Task, len(o.Tasks)) + for i := range m.Tasks { + m.Tasks[i] = &Task{} + deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + } + } + + if o.Clusters != nil { + m.Clusters = make([]*Cluster, len(o.Clusters)) + for i := range m.Clusters { + m.Clusters[i] = &Cluster{} + deepcopy.Copy(m.Clusters[i], o.Clusters[i]) + } + } + + if o.Secrets != nil { + m.Secrets = make([]*Secret, len(o.Secrets)) + for i := range m.Secrets { + m.Secrets[i] = &Secret{} + deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + } + } + + if o.Resources != nil { + m.Resources = make([]*Resource, len(o.Resources)) + for i := range m.Resources { + m.Resources[i] = &Resource{} + deepcopy.Copy(m.Resources[i], o.Resources[i]) + } + } + + if o.Extensions != nil { + m.Extensions = make([]*Extension, len(o.Extensions)) + for i := range m.Extensions { + m.Extensions[i] = &Extension{} + deepcopy.Copy(m.Extensions[i], o.Extensions[i]) + } + } + + if o.Configs != nil { + m.Configs = make([]*Config, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &Config{} + deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + +} + +func (m *ClusterSnapshot) Copy() *ClusterSnapshot { + if m == nil { + return nil + } + o := &ClusterSnapshot{} + o.CopyFrom(m) + return o +} + +func (m *ClusterSnapshot) CopyFrom(src interface{}) { + + o := src.(*ClusterSnapshot) + *m = *o + if o.Members != nil { + m.Members = make([]*RaftMember, len(o.Members)) + for i := range m.Members { + m.Members[i] = &RaftMember{} + deepcopy.Copy(m.Members[i], o.Members[i]) + } + } + + if o.Removed != nil { + m.Removed = make([]uint64, len(o.Removed)) + copy(m.Removed, o.Removed) + } + +} + +func (m *Snapshot) Copy() *Snapshot { + if m == nil { + return nil + } + o := &Snapshot{} + o.CopyFrom(m) + return o +} + +func (m *Snapshot) CopyFrom(src interface{}) { + + o := src.(*Snapshot) + *m = *o + deepcopy.Copy(&m.Membership, &o.Membership) + deepcopy.Copy(&m.Store, &o.Store) +} + +func (m *StoreSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreSnapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, msg := range m.Nodes { + dAtA[i] = 0xa + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Services) > 0 { + for _, msg := range m.Services { + dAtA[i] = 0x12 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x1a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0x22 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Clusters) > 0 { + for _, msg := range m.Clusters { + dAtA[i] = 0x2a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0x32 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x3a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Extensions) > 0 { + for _, msg := range m.Extensions { + dAtA[i] = 0x42 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0x4a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterSnapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0xa + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Removed) > 0 { + for _, num := range m.Removed { + dAtA[i] = 0x10 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(num)) + } + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Version != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(m.Version)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(m.Membership.Size())) + n1, err := m.Membership.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x1a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(m.Store.Size())) + n2, err := m.Store.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *StoreSnapshot) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Extensions) > 0 { + for _, e := range m.Extensions { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + return n +} + +func (m *ClusterSnapshot) Size() (n int) { + var l int + _ = l + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Removed) > 0 { + for _, e := range m.Removed { + n += 1 + sovSnapshot(uint64(e)) + } + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + if m.Version != 0 { + n += 1 + sovSnapshot(uint64(m.Version)) + } + l = m.Membership.Size() + n += 1 + l + sovSnapshot(uint64(l)) + l = m.Store.Size() + n += 1 + l + sovSnapshot(uint64(l)) + return n +} + +func sovSnapshot(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSnapshot(x uint64) (n int) { + return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StoreSnapshot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreSnapshot{`, + `Nodes:` + strings.Replace(fmt.Sprintf("%v", this.Nodes), "Node", "Node", 1) + `,`, + `Services:` + strings.Replace(fmt.Sprintf("%v", this.Services), "Service", "Service", 1) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "Network", "Network", 1) + `,`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`, + `Clusters:` + strings.Replace(fmt.Sprintf("%v", this.Clusters), "Cluster", "Cluster", 1) + `,`, + `Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "Secret", "Secret", 1) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1) + `,`, + `Extensions:` + strings.Replace(fmt.Sprintf("%v", this.Extensions), "Extension", "Extension", 1) + `,`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterSnapshot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterSnapshot{`, + `Members:` + strings.Replace(fmt.Sprintf("%v", this.Members), "RaftMember", "RaftMember", 1) + `,`, + `Removed:` + fmt.Sprintf("%v", this.Removed) + `,`, + `}`, + }, "") + return s +} +func (this *Snapshot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Snapshot{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Membership:` + strings.Replace(strings.Replace(this.Membership.String(), "ClusterSnapshot", "ClusterSnapshot", 1), `&`, ``, 1) + `,`, + `Store:` + strings.Replace(strings.Replace(this.Store.String(), "StoreSnapshot", "StoreSnapshot", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringSnapshot(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StoreSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, &Node{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Services = append(m.Services, &Service{}) + if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &Network{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Clusters = append(m.Clusters, &Cluster{}) + if err := m.Clusters[len(m.Clusters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, &Secret{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, &Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Extensions = append(m.Extensions, &Extension{}) + if err := m.Extensions[len(m.Extensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &Config{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &RaftMember{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Removed = append(m.Removed, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Removed = append(m.Removed, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Removed", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (Snapshot_Version(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Membership.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Store.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSnapshot(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSnapshot + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSnapshot(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/snapshot.proto", fileDescriptorSnapshot) +} + +var fileDescriptorSnapshot = []byte{ + // 493 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xbf, 0x8f, 0xd3, 0x30, + 0x14, 0xc7, 0x93, 0xf4, 0x47, 0x7a, 0x3e, 0x01, 0x87, 0xc5, 0x60, 0x95, 0x23, 0x94, 0xc2, 0x50, + 0x24, 0x48, 0xa0, 0x20, 0x81, 0x90, 0x8e, 0xa1, 0x27, 0x06, 0x06, 0x6e, 0x70, 0xd1, 0x89, 0x35, + 0x4d, 0xdd, 0x36, 0x84, 0xc4, 0x95, 0x9f, 0xdb, 0x63, 0x84, 0xff, 0xae, 0x23, 0x23, 0x13, 0xe2, + 0xba, 0xf0, 0x6f, 0x20, 0xdb, 0x71, 0xa8, 0x44, 0x7a, 0xb7, 0x45, 0xd6, 0xe7, 0xf3, 0xde, 0xd7, + 0xce, 0x7b, 0xe8, 0xe9, 0x3c, 0x95, 0x8b, 0xd5, 0x24, 0x4c, 0x78, 0x1e, 0x4d, 0x79, 0x92, 0x31, + 0x11, 0xc1, 0x45, 0x2c, 0xf2, 0x2c, 0x95, 0x51, 0xbc, 0x4c, 0x23, 0x28, 0xe2, 0x25, 0x2c, 0xb8, + 0x0c, 0x97, 0x82, 0x4b, 0x8e, 0xb1, 0x61, 0x42, 0xcb, 0x84, 0xeb, 0xe7, 0xdd, 0x27, 0xd7, 0x94, + 0xe0, 0x93, 0xcf, 0x2c, 0x91, 0x60, 0x2a, 0x74, 0x1f, 0x5f, 0x43, 0x8b, 0x78, 0x56, 0x36, 0xeb, + 0xde, 0x99, 0xf3, 0x39, 0xd7, 0x9f, 0x91, 0xfa, 0x32, 0xa7, 0xfd, 0xef, 0x4d, 0x74, 0x63, 0x2c, + 0xb9, 0x60, 0xe3, 0x32, 0x1a, 0x0e, 0x51, 0xab, 0xe0, 0x53, 0x06, 0xc4, 0xed, 0x35, 0x06, 0x87, + 0x43, 0x12, 0xfe, 0x1f, 0x32, 0x3c, 0xe3, 0x53, 0x46, 0x0d, 0x86, 0x5f, 0xa1, 0x0e, 0x30, 0xb1, + 0x4e, 0x13, 0x06, 0xc4, 0xd3, 0xca, 0xdd, 0x3a, 0x65, 0x6c, 0x18, 0x5a, 0xc1, 0x4a, 0x2c, 0x98, + 0xbc, 0xe0, 0x22, 0x03, 0xd2, 0xd8, 0x2f, 0x9e, 0x19, 0x86, 0x56, 0xb0, 0x4a, 0x28, 0x63, 0xc8, + 0x80, 0x34, 0xf7, 0x27, 0xfc, 0x18, 0x43, 0x46, 0x0d, 0xa6, 0x1a, 0x25, 0x5f, 0x56, 0x20, 0x99, + 0x00, 0xd2, 0xda, 0xdf, 0xe8, 0xd4, 0x30, 0xb4, 0x82, 0xf1, 0x4b, 0xe4, 0x03, 0x4b, 0x04, 0x93, + 0x40, 0xda, 0xda, 0xeb, 0xd6, 0xdf, 0x4c, 0x21, 0xd4, 0xa2, 0xf8, 0x0d, 0x3a, 0x10, 0x0c, 0xf8, + 0x4a, 0xa8, 0x17, 0xf1, 0xb5, 0x77, 0x5c, 0xe7, 0xd1, 0x12, 0xa2, 0xff, 0x70, 0x7c, 0x82, 0x10, + 0xfb, 0x2a, 0x59, 0x01, 0x29, 0x2f, 0x80, 0x74, 0xb4, 0x7c, 0xaf, 0x4e, 0x7e, 0x67, 0x29, 0xba, + 0x23, 0xa8, 0xc0, 0x09, 0x2f, 0x66, 0xe9, 0x1c, 0xc8, 0xc1, 0xfe, 0xc0, 0xa7, 0x1a, 0xa1, 0x16, + 0xed, 0xa7, 0xe8, 0x56, 0x79, 0xf7, 0x6a, 0x08, 0x5e, 0x23, 0x3f, 0x67, 0xf9, 0x44, 0xbd, 0x98, + 0x19, 0x83, 0xa0, 0xf6, 0x06, 0xf1, 0x4c, 0x7e, 0xd0, 0x18, 0xb5, 0x38, 0x3e, 0x46, 0xbe, 0x60, + 0x39, 0x5f, 0xb3, 0xa9, 0x9e, 0x86, 0xe6, 0xc8, 0x3b, 0x72, 0xa8, 0x3d, 0xea, 0xff, 0x71, 0x51, + 0xa7, 0x6a, 0xf2, 0x16, 0xf9, 0x6b, 0x26, 0x54, 0x72, 0xe2, 0xf6, 0xdc, 0xc1, 0xcd, 0xe1, 0xa3, + 0xda, 0xe7, 0xb5, 0x3b, 0x73, 0x6e, 0x58, 0x6a, 0x25, 0xfc, 0x1e, 0xa1, 0xb2, 0xeb, 0x22, 0x5d, + 0x12, 0xaf, 0xe7, 0x0e, 0x0e, 0x87, 0x0f, 0xaf, 0xf8, 0xb3, 0xb6, 0xd2, 0xa8, 0xb9, 0xf9, 0x75, + 0xdf, 0xa1, 0x3b, 0x32, 0x3e, 0x41, 0x2d, 0x50, 0x5b, 0x40, 0x1a, 0xba, 0xca, 0x83, 0xda, 0x20, + 0xbb, 0x6b, 0x52, 0xd6, 0x30, 0x56, 0xff, 0x36, 0xf2, 0xcb, 0x74, 0xb8, 0x8d, 0xbc, 0xf3, 0x67, + 0x47, 0xce, 0x88, 0x6c, 0x2e, 0x03, 0xe7, 0xe7, 0x65, 0xe0, 0x7c, 0xdb, 0x06, 0xee, 0x66, 0x1b, + 0xb8, 0x3f, 0xb6, 0x81, 0xfb, 0x7b, 0x1b, 0xb8, 0x9f, 0xbc, 0x49, 0x5b, 0xef, 0xde, 0x8b, 0xbf, + 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xbe, 0x47, 0xec, 0x2f, 0x04, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/snapshot.proto b/extender/code/vendor/github.com/docker/swarmkit/api/snapshot.proto new file mode 100644 index 000000000..91e9592d4 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/snapshot.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/objects.proto"; +import "github.com/docker/swarmkit/api/raft.proto"; +import weak "gogoproto/gogo.proto"; + +// StoreSnapshot is used to store snapshots of the store. +message StoreSnapshot { + // TODO(aaronl): The current method of assembling a StoreSnapshot + // structure and marshalling it is not optimal. It may be better to + // write out nodes, networks, tasks, etc. one at a time to an io.Writer + // using gogo-protobuf's io.DelimitedWriter. A new value of the version + // field could support this approach. + + repeated Node nodes = 1; + repeated Service services = 2; + repeated Network networks = 3; + repeated Task tasks = 4; + repeated Cluster clusters = 5; + repeated Secret secrets = 6; + repeated Resource resources = 7; + repeated Extension extensions = 8; + repeated Config configs = 9; +} + +// ClusterSnapshot stores cluster membership information in snapshots. +message ClusterSnapshot { + repeated RaftMember members = 1; + repeated uint64 removed = 2 [packed=false]; +} + +message Snapshot { + enum Version { + // V0 is the initial version of the StoreSnapshot message. + V0 = 0; + } + + Version version = 1; + + ClusterSnapshot membership = 2 [(gogoproto.nullable) = false]; + StoreSnapshot store = 3 [(gogoproto.nullable) = false]; +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/specs.pb.go b/extender/code/vendor/github.com/docker/swarmkit/api/specs.pb.go new file mode 100644 index 000000000..2930afc6c --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/specs.pb.go @@ -0,0 +1,6726 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/specs.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" +import google_protobuf3 "github.com/gogo/protobuf/types" +import google_protobuf4 "github.com/gogo/protobuf/types" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type NodeSpec_Membership int32 + +const ( + NodeMembershipPending NodeSpec_Membership = 0 + NodeMembershipAccepted NodeSpec_Membership = 1 +) + +var NodeSpec_Membership_name = map[int32]string{ + 0: "PENDING", + 1: "ACCEPTED", +} +var NodeSpec_Membership_value = map[string]int32{ + "PENDING": 0, + "ACCEPTED": 1, +} + +func (x NodeSpec_Membership) String() string { + return proto.EnumName(NodeSpec_Membership_name, int32(x)) +} +func (NodeSpec_Membership) EnumDescriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0, 0} } + +type NodeSpec_Availability int32 + +const ( + // Active nodes. + NodeAvailabilityActive NodeSpec_Availability = 0 + // Paused nodes won't be considered by the scheduler, preventing any + // further task to run on them. + NodeAvailabilityPause NodeSpec_Availability = 1 + // Drained nodes are paused and any task already running on them will + // be evicted. + NodeAvailabilityDrain NodeSpec_Availability = 2 +) + +var NodeSpec_Availability_name = map[int32]string{ + 0: "ACTIVE", + 1: "PAUSE", + 2: "DRAIN", +} +var NodeSpec_Availability_value = map[string]int32{ + "ACTIVE": 0, + "PAUSE": 1, + "DRAIN": 2, +} + +func (x NodeSpec_Availability) String() string { + return proto.EnumName(NodeSpec_Availability_name, int32(x)) +} +func (NodeSpec_Availability) EnumDescriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0, 1} } + +type ContainerSpec_Isolation int32 + +const ( + // ISOLATION_DEFAULT uses whatever default value from the container runtime + ContainerIsolationDefault ContainerSpec_Isolation = 0 + // ISOLATION_PROCESS forces windows container isolation + ContainerIsolationProcess ContainerSpec_Isolation = 1 + // ISOLATION_HYPERV forces Hyper-V isolation + ContainerIsolationHyperV ContainerSpec_Isolation = 2 +) + +var ContainerSpec_Isolation_name = map[int32]string{ + 0: "ISOLATION_DEFAULT", + 1: "ISOLATION_PROCESS", + 2: "ISOLATION_HYPERV", +} +var ContainerSpec_Isolation_value = map[string]int32{ + "ISOLATION_DEFAULT": 0, + "ISOLATION_PROCESS": 1, + "ISOLATION_HYPERV": 2, +} + +func (x ContainerSpec_Isolation) String() string { + return proto.EnumName(ContainerSpec_Isolation_name, int32(x)) +} +func (ContainerSpec_Isolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptorSpecs, []int{8, 0} +} + +// ResolutionMode specifies the mode of resolution to use for +// internal loadbalancing between tasks which are all within +// the cluster. This is sometimes calls east-west data path. +type EndpointSpec_ResolutionMode int32 + +const ( + // VIP resolution mode specifies that the + // service resolves to a logical IP and the requests + // are sent to that logical IP. Packets hitting that + // logical IP are load balanced to a chosen backend. + ResolutionModeVirtualIP EndpointSpec_ResolutionMode = 0 + // DNSRR resolution mode specifies that the + // service directly gets resolved to one of the + // backend IP and the client directly initiates a + // request towards the actual backend. This requires + // that the client does not cache the DNS responses + // when the DNS response TTL is 0. + ResolutionModeDNSRoundRobin EndpointSpec_ResolutionMode = 1 +) + +var EndpointSpec_ResolutionMode_name = map[int32]string{ + 0: "VIP", + 1: "DNSRR", +} +var EndpointSpec_ResolutionMode_value = map[string]int32{ + "VIP": 0, + "DNSRR": 1, +} + +func (x EndpointSpec_ResolutionMode) String() string { + return proto.EnumName(EndpointSpec_ResolutionMode_name, int32(x)) +} +func (EndpointSpec_ResolutionMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptorSpecs, []int{9, 0} +} + +type NodeSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // DesiredRole defines the role the node should have. + DesiredRole NodeRole `protobuf:"varint,2,opt,name=desired_role,json=desiredRole,proto3,enum=docker.swarmkit.v1.NodeRole" json:"desired_role,omitempty"` + // Membership controls the admission of the node into the cluster. + Membership NodeSpec_Membership `protobuf:"varint,3,opt,name=membership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"membership,omitempty"` + // Availability allows a user to control the current scheduling status of a + // node. + Availability NodeSpec_Availability `protobuf:"varint,4,opt,name=availability,proto3,enum=docker.swarmkit.v1.NodeSpec_Availability" json:"availability,omitempty"` +} + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0} } + +// ServiceSpec defines the properties of a service. +// +// A service instructs the cluster in orchestrating repeated instances of a +// template, implemented as tasks. Based on the number of instances, scheduling +// strategy and restart policy, a number of application-level behaviors can be +// defined. +type ServiceSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // Task defines the task template this service will spawn. + Task TaskSpec `protobuf:"bytes,2,opt,name=task" json:"task"` + // Types that are valid to be assigned to Mode: + // *ServiceSpec_Replicated + // *ServiceSpec_Global + Mode isServiceSpec_Mode `protobuf_oneof:"mode"` + // Update contains settings which affect updates. + Update *UpdateConfig `protobuf:"bytes,6,opt,name=update" json:"update,omitempty"` + // Rollback contains settings which affect rollbacks of updates. + Rollback *UpdateConfig `protobuf:"bytes,9,opt,name=rollback" json:"rollback,omitempty"` + // ServiceSpec.Networks has been deprecated and is replaced by + // Networks field in Task (TaskSpec.Networks). + // This field (ServiceSpec.Networks) is kept for compatibility. + // In case TaskSpec.Networks does not exist, ServiceSpec.Networks + // is still honored if it exists. + Networks []*NetworkAttachmentConfig `protobuf:"bytes,7,rep,name=networks" json:"networks,omitempty"` + // Service endpoint specifies the user provided configuration + // to properly discover and load balance a service. + Endpoint *EndpointSpec `protobuf:"bytes,8,opt,name=endpoint" json:"endpoint,omitempty"` +} + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{1} } + +type isServiceSpec_Mode interface { + isServiceSpec_Mode() + MarshalTo([]byte) (int, error) + Size() int +} + +type ServiceSpec_Replicated struct { + Replicated *ReplicatedService `protobuf:"bytes,3,opt,name=replicated,oneof"` +} +type ServiceSpec_Global struct { + Global *GlobalService `protobuf:"bytes,4,opt,name=global,oneof"` +} + +func (*ServiceSpec_Replicated) isServiceSpec_Mode() {} +func (*ServiceSpec_Global) isServiceSpec_Mode() {} + +func (m *ServiceSpec) GetMode() isServiceSpec_Mode { + if m != nil { + return m.Mode + } + return nil +} + +func (m *ServiceSpec) GetReplicated() *ReplicatedService { + if x, ok := m.GetMode().(*ServiceSpec_Replicated); ok { + return x.Replicated + } + return nil +} + +func (m *ServiceSpec) GetGlobal() *GlobalService { + if x, ok := m.GetMode().(*ServiceSpec_Global); ok { + return x.Global + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServiceSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServiceSpec_OneofMarshaler, _ServiceSpec_OneofUnmarshaler, _ServiceSpec_OneofSizer, []interface{}{ + (*ServiceSpec_Replicated)(nil), + (*ServiceSpec_Global)(nil), + } +} + +func _ServiceSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServiceSpec) + // mode + switch x := m.Mode.(type) { + case *ServiceSpec_Replicated: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Replicated); err != nil { + return err + } + case *ServiceSpec_Global: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Global); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ServiceSpec.Mode has unexpected type %T", x) + } + return nil +} + +func _ServiceSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServiceSpec) + switch tag { + case 3: // mode.replicated + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ReplicatedService) + err := b.DecodeMessage(msg) + m.Mode = &ServiceSpec_Replicated{msg} + return true, err + case 4: // mode.global + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GlobalService) + err := b.DecodeMessage(msg) + m.Mode = &ServiceSpec_Global{msg} + return true, err + default: + return false, nil + } +} + +func _ServiceSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServiceSpec) + // mode + switch x := m.Mode.(type) { + case *ServiceSpec_Replicated: + s := proto.Size(x.Replicated) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServiceSpec_Global: + s := proto.Size(x.Global) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// ReplicatedService sets the reconciliation target to certain number of replicas. +type ReplicatedService struct { + Replicas uint64 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"` +} + +func (m *ReplicatedService) Reset() { *m = ReplicatedService{} } +func (*ReplicatedService) ProtoMessage() {} +func (*ReplicatedService) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{2} } + +// GlobalService represents global service. +type GlobalService struct { +} + +func (m *GlobalService) Reset() { *m = GlobalService{} } +func (*GlobalService) ProtoMessage() {} +func (*GlobalService) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{3} } + +type TaskSpec struct { + // Types that are valid to be assigned to Runtime: + // *TaskSpec_Attachment + // *TaskSpec_Container + // *TaskSpec_Generic + Runtime isTaskSpec_Runtime `protobuf_oneof:"runtime"` + // Resource requirements for the container. + Resources *ResourceRequirements `protobuf:"bytes,2,opt,name=resources" json:"resources,omitempty"` + // RestartPolicy specifies what to do when a task fails or finishes. + Restart *RestartPolicy `protobuf:"bytes,4,opt,name=restart" json:"restart,omitempty"` + // Placement specifies node selection constraints + Placement *Placement `protobuf:"bytes,5,opt,name=placement" json:"placement,omitempty"` + // LogDriver specifies the log driver to use for the task. Any runtime will + // direct logs into the specified driver for the duration of the task. + LogDriver *Driver `protobuf:"bytes,6,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"` + // Networks specifies the list of network attachment + // configurations (which specify the network and per-network + // aliases) that this task spec is bound to. + Networks []*NetworkAttachmentConfig `protobuf:"bytes,7,rep,name=networks" json:"networks,omitempty"` + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. We do this to allow forced restarts + // using the same reconciliation-based mechanism that performs rolling + // updates. + ForceUpdate uint64 `protobuf:"varint,9,opt,name=force_update,json=forceUpdate,proto3" json:"force_update,omitempty"` + // ResourceReferences provides a generic way to specify resources that + // are used by this task, and should be sent down to agents along with + // the task. Inside the runtime field there may be more specific + // information about how to use the resource, but ResourceReferences + // establishes the relationship at the store level, and instructs the + // dispatcher to send the related objects. + // + // ResourceReferences is a list of ResourceReferences used by the task. + ResourceReferences []ResourceReference `protobuf:"bytes,11,rep,name=resource_references,json=resourceReferences" json:"resource_references"` +} + +func (m *TaskSpec) Reset() { *m = TaskSpec{} } +func (*TaskSpec) ProtoMessage() {} +func (*TaskSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{4} } + +type isTaskSpec_Runtime interface { + isTaskSpec_Runtime() + MarshalTo([]byte) (int, error) + Size() int +} + +type TaskSpec_Attachment struct { + Attachment *NetworkAttachmentSpec `protobuf:"bytes,8,opt,name=attachment,oneof"` +} +type TaskSpec_Container struct { + Container *ContainerSpec `protobuf:"bytes,1,opt,name=container,oneof"` +} +type TaskSpec_Generic struct { + Generic *GenericRuntimeSpec `protobuf:"bytes,10,opt,name=generic,oneof"` +} + +func (*TaskSpec_Attachment) isTaskSpec_Runtime() {} +func (*TaskSpec_Container) isTaskSpec_Runtime() {} +func (*TaskSpec_Generic) isTaskSpec_Runtime() {} + +func (m *TaskSpec) GetRuntime() isTaskSpec_Runtime { + if m != nil { + return m.Runtime + } + return nil +} + +func (m *TaskSpec) GetAttachment() *NetworkAttachmentSpec { + if x, ok := m.GetRuntime().(*TaskSpec_Attachment); ok { + return x.Attachment + } + return nil +} + +func (m *TaskSpec) GetContainer() *ContainerSpec { + if x, ok := m.GetRuntime().(*TaskSpec_Container); ok { + return x.Container + } + return nil +} + +func (m *TaskSpec) GetGeneric() *GenericRuntimeSpec { + if x, ok := m.GetRuntime().(*TaskSpec_Generic); ok { + return x.Generic + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TaskSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TaskSpec_OneofMarshaler, _TaskSpec_OneofUnmarshaler, _TaskSpec_OneofSizer, []interface{}{ + (*TaskSpec_Attachment)(nil), + (*TaskSpec_Container)(nil), + (*TaskSpec_Generic)(nil), + } +} + +func _TaskSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TaskSpec) + // runtime + switch x := m.Runtime.(type) { + case *TaskSpec_Attachment: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Attachment); err != nil { + return err + } + case *TaskSpec_Container: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Container); err != nil { + return err + } + case *TaskSpec_Generic: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Generic); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TaskSpec.Runtime has unexpected type %T", x) + } + return nil +} + +func _TaskSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TaskSpec) + switch tag { + case 8: // runtime.attachment + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NetworkAttachmentSpec) + err := b.DecodeMessage(msg) + m.Runtime = &TaskSpec_Attachment{msg} + return true, err + case 1: // runtime.container + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ContainerSpec) + err := b.DecodeMessage(msg) + m.Runtime = &TaskSpec_Container{msg} + return true, err + case 10: // runtime.generic + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GenericRuntimeSpec) + err := b.DecodeMessage(msg) + m.Runtime = &TaskSpec_Generic{msg} + return true, err + default: + return false, nil + } +} + +func _TaskSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TaskSpec) + // runtime + switch x := m.Runtime.(type) { + case *TaskSpec_Attachment: + s := proto.Size(x.Attachment) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TaskSpec_Container: + s := proto.Size(x.Container) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TaskSpec_Generic: + s := proto.Size(x.Generic) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ResourceReference struct { + ResourceID string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + ResourceType ResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=docker.swarmkit.v1.ResourceType" json:"resource_type,omitempty"` +} + +func (m *ResourceReference) Reset() { *m = ResourceReference{} } +func (*ResourceReference) ProtoMessage() {} +func (*ResourceReference) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{5} } + +type GenericRuntimeSpec struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Payload *google_protobuf3.Any `protobuf:"bytes,2,opt,name=payload" json:"payload,omitempty"` +} + +func (m *GenericRuntimeSpec) Reset() { *m = GenericRuntimeSpec{} } +func (*GenericRuntimeSpec) ProtoMessage() {} +func (*GenericRuntimeSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{6} } + +// NetworkAttachmentSpec specifies runtime parameters required to attach +// a container to a network. +type NetworkAttachmentSpec struct { + // ContainerID specifies a unique ID of the container for which + // this attachment is for. + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *NetworkAttachmentSpec) Reset() { *m = NetworkAttachmentSpec{} } +func (*NetworkAttachmentSpec) ProtoMessage() {} +func (*NetworkAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{7} } + +// Container specifies runtime parameters for a container. +type ContainerSpec struct { + // image defines the image reference, as specified in the + // distribution/reference package. This may include a registry host, name, + // tag or digest. + // + // The field will be directly passed to the engine pulling. Well-behaved + // service definitions will used immutable references, either through tags + // that don't change or verifiable digests. + Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + // Labels defines labels to be added to the container at creation time. If + // collisions with system labels occur, these labels will be overridden. + // + // This field *must* remain compatible with the Labels field of + // Annotations. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Command to run the the container. The first element is a path to the + // executable and the following elements are treated as arguments. + // + // If command is empty, execution will fall back to the image's entrypoint. + // + // Command should only be used when overriding entrypoint. + Command []string `protobuf:"bytes,3,rep,name=command" json:"command,omitempty"` + // Args specifies arguments provided to the image's entrypoint. + // + // If Command and Args are provided, Args will be appended to Command. + Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` + // Hostname specifies the hostname that will be set on containers created by docker swarm. + // All containers for a given service will have the same hostname + Hostname string `protobuf:"bytes,14,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Env specifies the environment variables for the container in NAME=VALUE + // format. These must be compliant with [IEEE Std + // 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html). + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + // Dir defines the working directory to set for the container process. + Dir string `protobuf:"bytes,6,opt,name=dir,proto3" json:"dir,omitempty"` + // User specifies the user that should be employed to run the container. + // + // Note that the primary group may be specified by appending the group name + // or id to the user name, separated by a `:`. This syntax is + // `:`. + User string `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"` + // Groups specifies supplementary groups available to the user. + Groups []string `protobuf:"bytes,11,rep,name=groups" json:"groups,omitempty"` + // Privileges specifies security configuration/permissions. + Privileges *Privileges `protobuf:"bytes,22,opt,name=privileges" json:"privileges,omitempty"` + // Init declares that a custom init will be running inside the container, if null, use the daemon's configured settings + Init *google_protobuf4.BoolValue `protobuf:"bytes,23,opt,name=init" json:"init,omitempty"` + // TTY declares that a TTY should be attached to the standard streams, + // including stdin if it is still open. + TTY bool `protobuf:"varint,13,opt,name=tty,proto3" json:"tty,omitempty"` + // OpenStdin declares that the standard input (stdin) should be open. + OpenStdin bool `protobuf:"varint,18,opt,name=open_stdin,json=openStdin,proto3" json:"open_stdin,omitempty"` + // ReadOnly declares that the container root filesystem is read-only. + // This only impacts the root filesystem, not additional mounts (including + // tmpfs). For additional mounts that are not part of the initial rootfs, + // they will be decided by the modes passed in the mount definition. + ReadOnly bool `protobuf:"varint,19,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // StopSignal defines the signal to stop the container. + StopSignal string `protobuf:"bytes,20,opt,name=stop_signal,json=stopSignal,proto3" json:"stop_signal,omitempty"` + Mounts []Mount `protobuf:"bytes,8,rep,name=mounts" json:"mounts"` + // StopGracePeriod the grace period for stopping the container before + // forcefully killing the container. + // Note: Can't use stdduration here because this needs to be nullable. + StopGracePeriod *google_protobuf1.Duration `protobuf:"bytes,9,opt,name=stop_grace_period,json=stopGracePeriod" json:"stop_grace_period,omitempty"` + // PullOptions parameterize the behavior of image pulls. + PullOptions *ContainerSpec_PullOptions `protobuf:"bytes,10,opt,name=pull_options,json=pullOptions" json:"pull_options,omitempty"` + // SecretReference contains references to zero or more secrets that + // will be exposed to the container. + Secrets []*SecretReference `protobuf:"bytes,12,rep,name=secrets" json:"secrets,omitempty"` + // ConfigReference contains references to zero or more configs that + // will be exposed to the container. + Configs []*ConfigReference `protobuf:"bytes,21,rep,name=configs" json:"configs,omitempty"` + // Hosts allow additional entries to be specified in /etc/hosts + // that associates IP addresses with hostnames. + // Detailed documentation is available in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // + // The format of the Hosts in swarmkit follows the same as + // above. + // This is different from `docker run --add-host :` + // where format is `:` + Hosts []string `protobuf:"bytes,17,rep,name=hosts" json:"hosts,omitempty"` + // DNSConfig allows one to specify DNS related configuration in resolv.conf + DNSConfig *ContainerSpec_DNSConfig `protobuf:"bytes,15,opt,name=dns_config,json=dnsConfig" json:"dns_config,omitempty"` + // Healthcheck describes how to check the container is healthy. If the + // container is considered unhealthy, it will be destroyed, its creating + // task will exit and a new task will be rescheduled elsewhere. A container + // is considered unhealthy after `Retries` number of consecutive failures. + Healthcheck *HealthConfig `protobuf:"bytes,16,opt,name=healthcheck" json:"healthcheck,omitempty"` + // Isolation defines the isolation level for windows containers (default, process, hyperv). + // Runtimes that don't support it ignore that field + Isolation ContainerSpec_Isolation `protobuf:"varint,24,opt,name=isolation,proto3,enum=docker.swarmkit.v1.ContainerSpec_Isolation" json:"isolation,omitempty"` + // PidsLimit prevents from OS resource damage by applications inside the container + // using fork bomb attack. + PidsLimit int64 `protobuf:"varint,25,opt,name=pidsLimit,proto3" json:"pidsLimit,omitempty"` +} + +func (m *ContainerSpec) Reset() { *m = ContainerSpec{} } +func (*ContainerSpec) ProtoMessage() {} +func (*ContainerSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{8} } + +// PullOptions allows one to parameterize an image pull. +type ContainerSpec_PullOptions struct { + // RegistryAuth is the registry auth token obtained from the client, required + // to pull private images. This is the unmodified JSON used as part of + // the `X-Registry-Auth` header. + // TODO(nishanttotla): This field will later be deprecated + RegistryAuth string `protobuf:"bytes,64,opt,name=registry_auth,json=registryAuth,proto3" json:"registry_auth,omitempty"` +} + +func (m *ContainerSpec_PullOptions) Reset() { *m = ContainerSpec_PullOptions{} } +func (*ContainerSpec_PullOptions) ProtoMessage() {} +func (*ContainerSpec_PullOptions) Descriptor() ([]byte, []int) { + return fileDescriptorSpecs, []int{8, 1} +} + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// TODO: domain is not supported yet +type ContainerSpec_DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `protobuf:"bytes,1,rep,name=nameservers" json:"nameservers,omitempty"` + // Search specifies the search list for host-name lookup + Search []string `protobuf:"bytes,2,rep,name=search" json:"search,omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` +} + +func (m *ContainerSpec_DNSConfig) Reset() { *m = ContainerSpec_DNSConfig{} } +func (*ContainerSpec_DNSConfig) ProtoMessage() {} +func (*ContainerSpec_DNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{8, 2} } + +// EndpointSpec defines the properties that can be configured to +// access and loadbalance the service. +type EndpointSpec struct { + Mode EndpointSpec_ResolutionMode `protobuf:"varint,1,opt,name=mode,proto3,enum=docker.swarmkit.v1.EndpointSpec_ResolutionMode" json:"mode,omitempty"` + // List of exposed ports that this service is accessible from + // external to the cluster. + Ports []*PortConfig `protobuf:"bytes,2,rep,name=ports" json:"ports,omitempty"` +} + +func (m *EndpointSpec) Reset() { *m = EndpointSpec{} } +func (*EndpointSpec) ProtoMessage() {} +func (*EndpointSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{9} } + +// NetworkSpec specifies user defined network parameters. +type NetworkSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // DriverConfig specific configuration consumed by the network driver. + DriverConfig *Driver `protobuf:"bytes,2,opt,name=driver_config,json=driverConfig" json:"driver_config,omitempty"` + // IPv6Enabled enables support for IPv6 on the network. + Ipv6Enabled bool `protobuf:"varint,3,opt,name=ipv6_enabled,json=ipv6Enabled,proto3" json:"ipv6_enabled,omitempty"` + // internal restricts external access to the network. This may be + // accomplished by disabling the default gateway or through other means. + Internal bool `protobuf:"varint,4,opt,name=internal,proto3" json:"internal,omitempty"` + IPAM *IPAMOptions `protobuf:"bytes,5,opt,name=ipam" json:"ipam,omitempty"` + // Attachable allows external(to swarm) entities to manually + // attach to this network. With this flag enabled, external + // entities such as containers running in an worker node in + // the cluster can manually attach to this network and access + // the services attached to this network. If this flag is not + // enabled(default case) no manual attachment to this network + // can happen. + Attachable bool `protobuf:"varint,6,opt,name=attachable,proto3" json:"attachable,omitempty"` + // Ingress indicates this network will provide the routing-mesh. + // In older versions, the network providing the routing mesh was + // swarm internally created only and it was identified by the name + // "ingress" and the label "com.docker.swarm.internal": "true". + Ingress bool `protobuf:"varint,7,opt,name=ingress,proto3" json:"ingress,omitempty"` + // ConfigFrom is the source of the configuration for this network. + // + // Types that are valid to be assigned to ConfigFrom: + // *NetworkSpec_Network + ConfigFrom isNetworkSpec_ConfigFrom `protobuf_oneof:"config_from"` +} + +func (m *NetworkSpec) Reset() { *m = NetworkSpec{} } +func (*NetworkSpec) ProtoMessage() {} +func (*NetworkSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{10} } + +type isNetworkSpec_ConfigFrom interface { + isNetworkSpec_ConfigFrom() + MarshalTo([]byte) (int, error) + Size() int +} + +type NetworkSpec_Network struct { + Network string `protobuf:"bytes,8,opt,name=network,proto3,oneof"` +} + +func (*NetworkSpec_Network) isNetworkSpec_ConfigFrom() {} + +func (m *NetworkSpec) GetConfigFrom() isNetworkSpec_ConfigFrom { + if m != nil { + return m.ConfigFrom + } + return nil +} + +func (m *NetworkSpec) GetNetwork() string { + if x, ok := m.GetConfigFrom().(*NetworkSpec_Network); ok { + return x.Network + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NetworkSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NetworkSpec_OneofMarshaler, _NetworkSpec_OneofUnmarshaler, _NetworkSpec_OneofSizer, []interface{}{ + (*NetworkSpec_Network)(nil), + } +} + +func _NetworkSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NetworkSpec) + // config_from + switch x := m.ConfigFrom.(type) { + case *NetworkSpec_Network: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Network) + case nil: + default: + return fmt.Errorf("NetworkSpec.ConfigFrom has unexpected type %T", x) + } + return nil +} + +func _NetworkSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NetworkSpec) + switch tag { + case 8: // config_from.network + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ConfigFrom = &NetworkSpec_Network{x} + return true, err + default: + return false, nil + } +} + +func _NetworkSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NetworkSpec) + // config_from + switch x := m.ConfigFrom.(type) { + case *NetworkSpec_Network: + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Network))) + n += len(x.Network) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// ClusterSpec specifies global cluster settings. +type ClusterSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // DEPRECATED: AcceptancePolicy defines the certificate issuance policy. + // Acceptance policy is no longer customizable, and secrets have been + // replaced with join tokens. + AcceptancePolicy AcceptancePolicy `protobuf:"bytes,2,opt,name=acceptance_policy,json=acceptancePolicy" json:"acceptance_policy"` + // Orchestration defines cluster-level orchestration settings. + Orchestration OrchestrationConfig `protobuf:"bytes,3,opt,name=orchestration" json:"orchestration"` + // Raft defines the cluster's raft settings. + Raft RaftConfig `protobuf:"bytes,4,opt,name=raft" json:"raft"` + // Dispatcher defines cluster-level dispatcher settings. + Dispatcher DispatcherConfig `protobuf:"bytes,5,opt,name=dispatcher" json:"dispatcher"` + // CAConfig defines cluster-level certificate authority settings. + CAConfig CAConfig `protobuf:"bytes,6,opt,name=ca_config,json=caConfig" json:"ca_config"` + // TaskDefaults specifies the default values to use for task creation. + TaskDefaults TaskDefaults `protobuf:"bytes,7,opt,name=task_defaults,json=taskDefaults" json:"task_defaults"` + // EncryptionConfig defines the cluster's encryption settings. + EncryptionConfig EncryptionConfig `protobuf:"bytes,8,opt,name=encryption_config,json=encryptionConfig" json:"encryption_config"` +} + +func (m *ClusterSpec) Reset() { *m = ClusterSpec{} } +func (*ClusterSpec) ProtoMessage() {} +func (*ClusterSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{11} } + +// SecretSpec specifies a user-provided secret. +type SecretSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // Data is the secret payload - the maximum size is 500KB (that is, 500*1024 bytes) + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Templating *Driver `protobuf:"bytes,3,opt,name=templating" json:"templating,omitempty"` + // Driver is the the secret driver that is used to store the specified secret + Driver *Driver `protobuf:"bytes,4,opt,name=driver" json:"driver,omitempty"` +} + +func (m *SecretSpec) Reset() { *m = SecretSpec{} } +func (*SecretSpec) ProtoMessage() {} +func (*SecretSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{12} } + +// ConfigSpec specifies user-provided configuration files. +type ConfigSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // Data is the config payload - the maximum size is 500KB (that is, 500*1024 bytes) + // TODO(aaronl): Do we want to revise this to include multiple payloads in a single + // ConfigSpec? Define this to be a tar? etc... + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Templating *Driver `protobuf:"bytes,3,opt,name=templating" json:"templating,omitempty"` +} + +func (m *ConfigSpec) Reset() { *m = ConfigSpec{} } +func (*ConfigSpec) ProtoMessage() {} +func (*ConfigSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{13} } + +func init() { + proto.RegisterType((*NodeSpec)(nil), "docker.swarmkit.v1.NodeSpec") + proto.RegisterType((*ServiceSpec)(nil), "docker.swarmkit.v1.ServiceSpec") + proto.RegisterType((*ReplicatedService)(nil), "docker.swarmkit.v1.ReplicatedService") + proto.RegisterType((*GlobalService)(nil), "docker.swarmkit.v1.GlobalService") + proto.RegisterType((*TaskSpec)(nil), "docker.swarmkit.v1.TaskSpec") + proto.RegisterType((*ResourceReference)(nil), "docker.swarmkit.v1.ResourceReference") + proto.RegisterType((*GenericRuntimeSpec)(nil), "docker.swarmkit.v1.GenericRuntimeSpec") + proto.RegisterType((*NetworkAttachmentSpec)(nil), "docker.swarmkit.v1.NetworkAttachmentSpec") + proto.RegisterType((*ContainerSpec)(nil), "docker.swarmkit.v1.ContainerSpec") + proto.RegisterType((*ContainerSpec_PullOptions)(nil), "docker.swarmkit.v1.ContainerSpec.PullOptions") + proto.RegisterType((*ContainerSpec_DNSConfig)(nil), "docker.swarmkit.v1.ContainerSpec.DNSConfig") + proto.RegisterType((*EndpointSpec)(nil), "docker.swarmkit.v1.EndpointSpec") + proto.RegisterType((*NetworkSpec)(nil), "docker.swarmkit.v1.NetworkSpec") + proto.RegisterType((*ClusterSpec)(nil), "docker.swarmkit.v1.ClusterSpec") + proto.RegisterType((*SecretSpec)(nil), "docker.swarmkit.v1.SecretSpec") + proto.RegisterType((*ConfigSpec)(nil), "docker.swarmkit.v1.ConfigSpec") + proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Membership", NodeSpec_Membership_name, NodeSpec_Membership_value) + proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Availability", NodeSpec_Availability_name, NodeSpec_Availability_value) + proto.RegisterEnum("docker.swarmkit.v1.ContainerSpec_Isolation", ContainerSpec_Isolation_name, ContainerSpec_Isolation_value) + proto.RegisterEnum("docker.swarmkit.v1.EndpointSpec_ResolutionMode", EndpointSpec_ResolutionMode_name, EndpointSpec_ResolutionMode_value) +} + +func (m *NodeSpec) Copy() *NodeSpec { + if m == nil { + return nil + } + o := &NodeSpec{} + o.CopyFrom(m) + return o +} + +func (m *NodeSpec) CopyFrom(src interface{}) { + + o := src.(*NodeSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) +} + +func (m *ServiceSpec) Copy() *ServiceSpec { + if m == nil { + return nil + } + o := &ServiceSpec{} + o.CopyFrom(m) + return o +} + +func (m *ServiceSpec) CopyFrom(src interface{}) { + + o := src.(*ServiceSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.Task, &o.Task) + if o.Update != nil { + m.Update = &UpdateConfig{} + deepcopy.Copy(m.Update, o.Update) + } + if o.Rollback != nil { + m.Rollback = &UpdateConfig{} + deepcopy.Copy(m.Rollback, o.Rollback) + } + if o.Networks != nil { + m.Networks = make([]*NetworkAttachmentConfig, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &NetworkAttachmentConfig{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.Endpoint != nil { + m.Endpoint = &EndpointSpec{} + deepcopy.Copy(m.Endpoint, o.Endpoint) + } + if o.Mode != nil { + switch o.Mode.(type) { + case *ServiceSpec_Replicated: + v := ServiceSpec_Replicated{ + Replicated: &ReplicatedService{}, + } + deepcopy.Copy(v.Replicated, o.GetReplicated()) + m.Mode = &v + case *ServiceSpec_Global: + v := ServiceSpec_Global{ + Global: &GlobalService{}, + } + deepcopy.Copy(v.Global, o.GetGlobal()) + m.Mode = &v + } + } + +} + +func (m *ReplicatedService) Copy() *ReplicatedService { + if m == nil { + return nil + } + o := &ReplicatedService{} + o.CopyFrom(m) + return o +} + +func (m *ReplicatedService) CopyFrom(src interface{}) { + + o := src.(*ReplicatedService) + *m = *o +} + +func (m *GlobalService) Copy() *GlobalService { + if m == nil { + return nil + } + o := &GlobalService{} + o.CopyFrom(m) + return o +} + +func (m *GlobalService) CopyFrom(src interface{}) {} +func (m *TaskSpec) Copy() *TaskSpec { + if m == nil { + return nil + } + o := &TaskSpec{} + o.CopyFrom(m) + return o +} + +func (m *TaskSpec) CopyFrom(src interface{}) { + + o := src.(*TaskSpec) + *m = *o + if o.Resources != nil { + m.Resources = &ResourceRequirements{} + deepcopy.Copy(m.Resources, o.Resources) + } + if o.Restart != nil { + m.Restart = &RestartPolicy{} + deepcopy.Copy(m.Restart, o.Restart) + } + if o.Placement != nil { + m.Placement = &Placement{} + deepcopy.Copy(m.Placement, o.Placement) + } + if o.LogDriver != nil { + m.LogDriver = &Driver{} + deepcopy.Copy(m.LogDriver, o.LogDriver) + } + if o.Networks != nil { + m.Networks = make([]*NetworkAttachmentConfig, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &NetworkAttachmentConfig{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.ResourceReferences != nil { + m.ResourceReferences = make([]ResourceReference, len(o.ResourceReferences)) + for i := range m.ResourceReferences { + deepcopy.Copy(&m.ResourceReferences[i], &o.ResourceReferences[i]) + } + } + + if o.Runtime != nil { + switch o.Runtime.(type) { + case *TaskSpec_Attachment: + v := TaskSpec_Attachment{ + Attachment: &NetworkAttachmentSpec{}, + } + deepcopy.Copy(v.Attachment, o.GetAttachment()) + m.Runtime = &v + case *TaskSpec_Container: + v := TaskSpec_Container{ + Container: &ContainerSpec{}, + } + deepcopy.Copy(v.Container, o.GetContainer()) + m.Runtime = &v + case *TaskSpec_Generic: + v := TaskSpec_Generic{ + Generic: &GenericRuntimeSpec{}, + } + deepcopy.Copy(v.Generic, o.GetGeneric()) + m.Runtime = &v + } + } + +} + +func (m *ResourceReference) Copy() *ResourceReference { + if m == nil { + return nil + } + o := &ResourceReference{} + o.CopyFrom(m) + return o +} + +func (m *ResourceReference) CopyFrom(src interface{}) { + + o := src.(*ResourceReference) + *m = *o +} + +func (m *GenericRuntimeSpec) Copy() *GenericRuntimeSpec { + if m == nil { + return nil + } + o := &GenericRuntimeSpec{} + o.CopyFrom(m) + return o +} + +func (m *GenericRuntimeSpec) CopyFrom(src interface{}) { + + o := src.(*GenericRuntimeSpec) + *m = *o + if o.Payload != nil { + m.Payload = &google_protobuf3.Any{} + deepcopy.Copy(m.Payload, o.Payload) + } +} + +func (m *NetworkAttachmentSpec) Copy() *NetworkAttachmentSpec { + if m == nil { + return nil + } + o := &NetworkAttachmentSpec{} + o.CopyFrom(m) + return o +} + +func (m *NetworkAttachmentSpec) CopyFrom(src interface{}) { + + o := src.(*NetworkAttachmentSpec) + *m = *o +} + +func (m *ContainerSpec) Copy() *ContainerSpec { + if m == nil { + return nil + } + o := &ContainerSpec{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Command != nil { + m.Command = make([]string, len(o.Command)) + copy(m.Command, o.Command) + } + + if o.Args != nil { + m.Args = make([]string, len(o.Args)) + copy(m.Args, o.Args) + } + + if o.Env != nil { + m.Env = make([]string, len(o.Env)) + copy(m.Env, o.Env) + } + + if o.Groups != nil { + m.Groups = make([]string, len(o.Groups)) + copy(m.Groups, o.Groups) + } + + if o.Privileges != nil { + m.Privileges = &Privileges{} + deepcopy.Copy(m.Privileges, o.Privileges) + } + if o.Init != nil { + m.Init = &google_protobuf4.BoolValue{} + deepcopy.Copy(m.Init, o.Init) + } + if o.Mounts != nil { + m.Mounts = make([]Mount, len(o.Mounts)) + for i := range m.Mounts { + deepcopy.Copy(&m.Mounts[i], &o.Mounts[i]) + } + } + + if o.StopGracePeriod != nil { + m.StopGracePeriod = &google_protobuf1.Duration{} + deepcopy.Copy(m.StopGracePeriod, o.StopGracePeriod) + } + if o.PullOptions != nil { + m.PullOptions = &ContainerSpec_PullOptions{} + deepcopy.Copy(m.PullOptions, o.PullOptions) + } + if o.Secrets != nil { + m.Secrets = make([]*SecretReference, len(o.Secrets)) + for i := range m.Secrets { + m.Secrets[i] = &SecretReference{} + deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + } + } + + if o.Configs != nil { + m.Configs = make([]*ConfigReference, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &ConfigReference{} + deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + + if o.Hosts != nil { + m.Hosts = make([]string, len(o.Hosts)) + copy(m.Hosts, o.Hosts) + } + + if o.DNSConfig != nil { + m.DNSConfig = &ContainerSpec_DNSConfig{} + deepcopy.Copy(m.DNSConfig, o.DNSConfig) + } + if o.Healthcheck != nil { + m.Healthcheck = &HealthConfig{} + deepcopy.Copy(m.Healthcheck, o.Healthcheck) + } +} + +func (m *ContainerSpec_PullOptions) Copy() *ContainerSpec_PullOptions { + if m == nil { + return nil + } + o := &ContainerSpec_PullOptions{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec_PullOptions) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec_PullOptions) + *m = *o +} + +func (m *ContainerSpec_DNSConfig) Copy() *ContainerSpec_DNSConfig { + if m == nil { + return nil + } + o := &ContainerSpec_DNSConfig{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec_DNSConfig) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec_DNSConfig) + *m = *o + if o.Nameservers != nil { + m.Nameservers = make([]string, len(o.Nameservers)) + copy(m.Nameservers, o.Nameservers) + } + + if o.Search != nil { + m.Search = make([]string, len(o.Search)) + copy(m.Search, o.Search) + } + + if o.Options != nil { + m.Options = make([]string, len(o.Options)) + copy(m.Options, o.Options) + } + +} + +func (m *EndpointSpec) Copy() *EndpointSpec { + if m == nil { + return nil + } + o := &EndpointSpec{} + o.CopyFrom(m) + return o +} + +func (m *EndpointSpec) CopyFrom(src interface{}) { + + o := src.(*EndpointSpec) + *m = *o + if o.Ports != nil { + m.Ports = make([]*PortConfig, len(o.Ports)) + for i := range m.Ports { + m.Ports[i] = &PortConfig{} + deepcopy.Copy(m.Ports[i], o.Ports[i]) + } + } + +} + +func (m *NetworkSpec) Copy() *NetworkSpec { + if m == nil { + return nil + } + o := &NetworkSpec{} + o.CopyFrom(m) + return o +} + +func (m *NetworkSpec) CopyFrom(src interface{}) { + + o := src.(*NetworkSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.DriverConfig != nil { + m.DriverConfig = &Driver{} + deepcopy.Copy(m.DriverConfig, o.DriverConfig) + } + if o.IPAM != nil { + m.IPAM = &IPAMOptions{} + deepcopy.Copy(m.IPAM, o.IPAM) + } + if o.ConfigFrom != nil { + switch o.ConfigFrom.(type) { + case *NetworkSpec_Network: + v := NetworkSpec_Network{ + Network: o.GetNetwork(), + } + m.ConfigFrom = &v + } + } + +} + +func (m *ClusterSpec) Copy() *ClusterSpec { + if m == nil { + return nil + } + o := &ClusterSpec{} + o.CopyFrom(m) + return o +} + +func (m *ClusterSpec) CopyFrom(src interface{}) { + + o := src.(*ClusterSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.AcceptancePolicy, &o.AcceptancePolicy) + deepcopy.Copy(&m.Orchestration, &o.Orchestration) + deepcopy.Copy(&m.Raft, &o.Raft) + deepcopy.Copy(&m.Dispatcher, &o.Dispatcher) + deepcopy.Copy(&m.CAConfig, &o.CAConfig) + deepcopy.Copy(&m.TaskDefaults, &o.TaskDefaults) + deepcopy.Copy(&m.EncryptionConfig, &o.EncryptionConfig) +} + +func (m *SecretSpec) Copy() *SecretSpec { + if m == nil { + return nil + } + o := &SecretSpec{} + o.CopyFrom(m) + return o +} + +func (m *SecretSpec) CopyFrom(src interface{}) { + + o := src.(*SecretSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Templating != nil { + m.Templating = &Driver{} + deepcopy.Copy(m.Templating, o.Templating) + } + if o.Driver != nil { + m.Driver = &Driver{} + deepcopy.Copy(m.Driver, o.Driver) + } +} + +func (m *ConfigSpec) Copy() *ConfigSpec { + if m == nil { + return nil + } + o := &ConfigSpec{} + o.CopyFrom(m) + return o +} + +func (m *ConfigSpec) CopyFrom(src interface{}) { + + o := src.(*ConfigSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Templating != nil { + m.Templating = &Driver{} + deepcopy.Copy(m.Templating, o.Templating) + } +} + +func (m *NodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n1, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.DesiredRole != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.DesiredRole)) + } + if m.Membership != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Membership)) + } + if m.Availability != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Availability)) + } + return i, nil +} + +func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n2, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Task.Size())) + n3, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if m.Mode != nil { + nn4, err := m.Mode.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn4 + } + if m.Update != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Update.Size())) + n5, err := m.Update.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Endpoint != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Endpoint.Size())) + n6, err := m.Endpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Rollback != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Rollback.Size())) + n7, err := m.Rollback.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *ServiceSpec_Replicated) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Replicated != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Replicated.Size())) + n8, err := m.Replicated.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *ServiceSpec_Global) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Global != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Global.Size())) + n9, err := m.Global.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *ReplicatedService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicatedService) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Replicas)) + } + return i, nil +} + +func (m *GlobalService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GlobalService) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TaskSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Runtime != nil { + nn10, err := m.Runtime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn10 + } + if m.Resources != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Resources.Size())) + n11, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Restart != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Restart.Size())) + n12, err := m.Restart.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Placement != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Placement.Size())) + n13, err := m.Placement.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.LogDriver != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.LogDriver.Size())) + n14, err := m.LogDriver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ForceUpdate != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.ForceUpdate)) + } + if len(m.ResourceReferences) > 0 { + for _, msg := range m.ResourceReferences { + dAtA[i] = 0x5a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TaskSpec_Container) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Container != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Container.Size())) + n15, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} +func (m *TaskSpec_Attachment) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Attachment != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Attachment.Size())) + n16, err := m.Attachment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} +func (m *TaskSpec_Generic) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Generic != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Generic.Size())) + n17, err := m.Generic.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} +func (m *ResourceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.ResourceID))) + i += copy(dAtA[i:], m.ResourceID) + } + if m.ResourceType != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.ResourceType)) + } + return i, nil +} + +func (m *GenericRuntimeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericRuntimeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Payload != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Payload.Size())) + n18, err := m.Payload.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func (m *NetworkAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *ContainerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Image) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v))) + i = encodeVarintSpecs(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Dir) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Dir))) + i += copy(dAtA[i:], m.Dir) + } + if len(m.User) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Mounts) > 0 { + for _, msg := range m.Mounts { + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StopGracePeriod != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.StopGracePeriod.Size())) + n19, err := m.StopGracePeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.PullOptions != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.PullOptions.Size())) + n20, err := m.PullOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x5a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0x62 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.TTY { + dAtA[i] = 0x68 + i++ + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Hostname) > 0 { + dAtA[i] = 0x72 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + } + if m.DNSConfig != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.DNSConfig.Size())) + n21, err := m.DNSConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Healthcheck != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Healthcheck.Size())) + n22, err := m.Healthcheck.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.OpenStdin { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + if m.OpenStdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ReadOnly { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x1 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.StopSignal) > 0 { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.StopSignal))) + i += copy(dAtA[i:], m.StopSignal) + } + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Privileges != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Privileges.Size())) + n23, err := m.Privileges.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.Init != nil { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Init.Size())) + n24, err := m.Init.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.Isolation != 0 { + dAtA[i] = 0xc0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Isolation)) + } + if m.PidsLimit != 0 { + dAtA[i] = 0xc8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.PidsLimit)) + } + return i, nil +} + +func (m *ContainerSpec_PullOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec_PullOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RegistryAuth) > 0 { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.RegistryAuth))) + i += copy(dAtA[i:], m.RegistryAuth) + } + return i, nil +} + +func (m *ContainerSpec_DNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec_DNSConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Search) > 0 { + for _, s := range m.Search { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Options) > 0 { + for _, s := range m.Options { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *EndpointSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Mode != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Mode)) + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n25, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + if m.DriverConfig != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.DriverConfig.Size())) + n26, err := m.DriverConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.Ipv6Enabled { + dAtA[i] = 0x18 + i++ + if m.Ipv6Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Internal { + dAtA[i] = 0x20 + i++ + if m.Internal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IPAM != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.IPAM.Size())) + n27, err := m.IPAM.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.Attachable { + dAtA[i] = 0x30 + i++ + if m.Attachable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Ingress { + dAtA[i] = 0x38 + i++ + if m.Ingress { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ConfigFrom != nil { + nn28, err := m.ConfigFrom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn28 + } + return i, nil +} + +func (m *NetworkSpec_Network) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Network))) + i += copy(dAtA[i:], m.Network) + return i, nil +} +func (m *ClusterSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n29, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.AcceptancePolicy.Size())) + n30, err := m.AcceptancePolicy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Orchestration.Size())) + n31, err := m.Orchestration.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Raft.Size())) + n32, err := m.Raft.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + dAtA[i] = 0x2a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Dispatcher.Size())) + n33, err := m.Dispatcher.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.CAConfig.Size())) + n34, err := m.CAConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.TaskDefaults.Size())) + n35, err := m.TaskDefaults.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.EncryptionConfig.Size())) + n36, err := m.EncryptionConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + return i, nil +} + +func (m *SecretSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n37, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Templating != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Templating.Size())) + n38, err := m.Templating.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + if m.Driver != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Driver.Size())) + n39, err := m.Driver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + return i, nil +} + +func (m *ConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n40, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Templating != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Templating.Size())) + n41, err := m.Templating.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} + +func encodeVarintSpecs(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *NodeSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + if m.DesiredRole != 0 { + n += 1 + sovSpecs(uint64(m.DesiredRole)) + } + if m.Membership != 0 { + n += 1 + sovSpecs(uint64(m.Membership)) + } + if m.Availability != 0 { + n += 1 + sovSpecs(uint64(m.Availability)) + } + return n +} + +func (m *ServiceSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Task.Size() + n += 1 + l + sovSpecs(uint64(l)) + if m.Mode != nil { + n += m.Mode.Size() + } + if m.Update != nil { + l = m.Update.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.Endpoint != nil { + l = m.Endpoint.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Rollback != nil { + l = m.Rollback.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ServiceSpec_Replicated) Size() (n int) { + var l int + _ = l + if m.Replicated != nil { + l = m.Replicated.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *ServiceSpec_Global) Size() (n int) { + var l int + _ = l + if m.Global != nil { + l = m.Global.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *ReplicatedService) Size() (n int) { + var l int + _ = l + if m.Replicas != 0 { + n += 1 + sovSpecs(uint64(m.Replicas)) + } + return n +} + +func (m *GlobalService) Size() (n int) { + var l int + _ = l + return n +} + +func (m *TaskSpec) Size() (n int) { + var l int + _ = l + if m.Runtime != nil { + n += m.Runtime.Size() + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Restart != nil { + l = m.Restart.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Placement != nil { + l = m.Placement.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.LogDriver != nil { + l = m.LogDriver.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.ForceUpdate != 0 { + n += 1 + sovSpecs(uint64(m.ForceUpdate)) + } + if len(m.ResourceReferences) > 0 { + for _, e := range m.ResourceReferences { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *TaskSpec_Container) Size() (n int) { + var l int + _ = l + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *TaskSpec_Attachment) Size() (n int) { + var l int + _ = l + if m.Attachment != nil { + l = m.Attachment.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *TaskSpec_Generic) Size() (n int) { + var l int + _ = l + if m.Generic != nil { + l = m.Generic.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *ResourceReference) Size() (n int) { + var l int + _ = l + l = len(m.ResourceID) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.ResourceType != 0 { + n += 1 + sovSpecs(uint64(m.ResourceType)) + } + return n +} + +func (m *GenericRuntimeSpec) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Payload != nil { + l = m.Payload.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *NetworkAttachmentSpec) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ContainerSpec) Size() (n int) { + var l int + _ = l + l = len(m.Image) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v))) + n += mapEntrySize + 1 + sovSpecs(uint64(mapEntrySize)) + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + l = len(m.Dir) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.StopGracePeriod != nil { + l = m.StopGracePeriod.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.PullOptions != nil { + l = m.PullOptions.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.TTY { + n += 2 + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Healthcheck != nil { + l = m.Healthcheck.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 2 + l + sovSpecs(uint64(l)) + } + } + if m.OpenStdin { + n += 3 + } + if m.ReadOnly { + n += 3 + } + l = len(m.StopSignal) + if l > 0 { + n += 2 + l + sovSpecs(uint64(l)) + } + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + } + if m.Privileges != nil { + l = m.Privileges.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + if m.Init != nil { + l = m.Init.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + if m.Isolation != 0 { + n += 2 + sovSpecs(uint64(m.Isolation)) + } + if m.PidsLimit != 0 { + n += 2 + sovSpecs(uint64(m.PidsLimit)) + } + return n +} + +func (m *ContainerSpec_PullOptions) Size() (n int) { + var l int + _ = l + l = len(m.RegistryAuth) + if l > 0 { + n += 2 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ContainerSpec_DNSConfig) Size() (n int) { + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Search) > 0 { + for _, s := range m.Search { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, s := range m.Options { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *EndpointSpec) Size() (n int) { + var l int + _ = l + if m.Mode != 0 { + n += 1 + sovSpecs(uint64(m.Mode)) + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *NetworkSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + if m.DriverConfig != nil { + l = m.DriverConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Ipv6Enabled { + n += 2 + } + if m.Internal { + n += 2 + } + if m.IPAM != nil { + l = m.IPAM.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Attachable { + n += 2 + } + if m.Ingress { + n += 2 + } + if m.ConfigFrom != nil { + n += m.ConfigFrom.Size() + } + return n +} + +func (m *NetworkSpec_Network) Size() (n int) { + var l int + _ = l + l = len(m.Network) + n += 1 + l + sovSpecs(uint64(l)) + return n +} +func (m *ClusterSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.AcceptancePolicy.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Orchestration.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Raft.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Dispatcher.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.CAConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.TaskDefaults.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.EncryptionConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + return n +} + +func (m *SecretSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Templating != nil { + l = m.Templating.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Driver != nil { + l = m.Driver.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ConfigSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Templating != nil { + l = m.Templating.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func sovSpecs(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSpecs(x uint64) (n int) { + return sovSpecs(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *NodeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `DesiredRole:` + fmt.Sprintf("%v", this.DesiredRole) + `,`, + `Membership:` + fmt.Sprintf("%v", this.Membership) + `,`, + `Availability:` + fmt.Sprintf("%v", this.Availability) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Task:` + strings.Replace(strings.Replace(this.Task.String(), "TaskSpec", "TaskSpec", 1), `&`, ``, 1) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Update:` + strings.Replace(fmt.Sprintf("%v", this.Update), "UpdateConfig", "UpdateConfig", 1) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachmentConfig", "NetworkAttachmentConfig", 1) + `,`, + `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "EndpointSpec", "EndpointSpec", 1) + `,`, + `Rollback:` + strings.Replace(fmt.Sprintf("%v", this.Rollback), "UpdateConfig", "UpdateConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec_Replicated) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceSpec_Replicated{`, + `Replicated:` + strings.Replace(fmt.Sprintf("%v", this.Replicated), "ReplicatedService", "ReplicatedService", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec_Global) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceSpec_Global{`, + `Global:` + strings.Replace(fmt.Sprintf("%v", this.Global), "GlobalService", "GlobalService", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicatedService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicatedService{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *GlobalService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlobalService{`, + `}`, + }, "") + return s +} +func (this *TaskSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec{`, + `Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "ResourceRequirements", 1) + `,`, + `Restart:` + strings.Replace(fmt.Sprintf("%v", this.Restart), "RestartPolicy", "RestartPolicy", 1) + `,`, + `Placement:` + strings.Replace(fmt.Sprintf("%v", this.Placement), "Placement", "Placement", 1) + `,`, + `LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachmentConfig", "NetworkAttachmentConfig", 1) + `,`, + `ForceUpdate:` + fmt.Sprintf("%v", this.ForceUpdate) + `,`, + `ResourceReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceReferences), "ResourceReference", "ResourceReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskSpec_Container) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec_Container{`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerSpec", "ContainerSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskSpec_Attachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec_Attachment{`, + `Attachment:` + strings.Replace(fmt.Sprintf("%v", this.Attachment), "NetworkAttachmentSpec", "NetworkAttachmentSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskSpec_Generic) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec_Generic{`, + `Generic:` + strings.Replace(fmt.Sprintf("%v", this.Generic), "GenericRuntimeSpec", "GenericRuntimeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceReference{`, + `ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`, + `ResourceType:` + fmt.Sprintf("%v", this.ResourceType) + `,`, + `}`, + }, "") + return s +} +func (this *GenericRuntimeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericRuntimeSpec{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Payload:` + strings.Replace(fmt.Sprintf("%v", this.Payload), "Any", "google_protobuf3.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkAttachmentSpec{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSpec) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ContainerSpec{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Env:` + fmt.Sprintf("%v", this.Env) + `,`, + `Dir:` + fmt.Sprintf("%v", this.Dir) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Mounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "Mount", 1), `&`, ``, 1) + `,`, + `StopGracePeriod:` + strings.Replace(fmt.Sprintf("%v", this.StopGracePeriod), "Duration", "google_protobuf1.Duration", 1) + `,`, + `PullOptions:` + strings.Replace(fmt.Sprintf("%v", this.PullOptions), "ContainerSpec_PullOptions", "ContainerSpec_PullOptions", 1) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "SecretReference", "SecretReference", 1) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "ContainerSpec_DNSConfig", "ContainerSpec_DNSConfig", 1) + `,`, + `Healthcheck:` + strings.Replace(fmt.Sprintf("%v", this.Healthcheck), "HealthConfig", "HealthConfig", 1) + `,`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `OpenStdin:` + fmt.Sprintf("%v", this.OpenStdin) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `StopSignal:` + fmt.Sprintf("%v", this.StopSignal) + `,`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "ConfigReference", "ConfigReference", 1) + `,`, + `Privileges:` + strings.Replace(fmt.Sprintf("%v", this.Privileges), "Privileges", "Privileges", 1) + `,`, + `Init:` + strings.Replace(fmt.Sprintf("%v", this.Init), "BoolValue", "google_protobuf4.BoolValue", 1) + `,`, + `Isolation:` + fmt.Sprintf("%v", this.Isolation) + `,`, + `PidsLimit:` + fmt.Sprintf("%v", this.PidsLimit) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSpec_PullOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerSpec_PullOptions{`, + `RegistryAuth:` + fmt.Sprintf("%v", this.RegistryAuth) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSpec_DNSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerSpec_DNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Search:` + fmt.Sprintf("%v", this.Search) + `,`, + `Options:` + fmt.Sprintf("%v", this.Options) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointSpec{`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `DriverConfig:` + strings.Replace(fmt.Sprintf("%v", this.DriverConfig), "Driver", "Driver", 1) + `,`, + `Ipv6Enabled:` + fmt.Sprintf("%v", this.Ipv6Enabled) + `,`, + `Internal:` + fmt.Sprintf("%v", this.Internal) + `,`, + `IPAM:` + strings.Replace(fmt.Sprintf("%v", this.IPAM), "IPAMOptions", "IPAMOptions", 1) + `,`, + `Attachable:` + fmt.Sprintf("%v", this.Attachable) + `,`, + `Ingress:` + fmt.Sprintf("%v", this.Ingress) + `,`, + `ConfigFrom:` + fmt.Sprintf("%v", this.ConfigFrom) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkSpec_Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkSpec_Network{`, + `Network:` + fmt.Sprintf("%v", this.Network) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `AcceptancePolicy:` + strings.Replace(strings.Replace(this.AcceptancePolicy.String(), "AcceptancePolicy", "AcceptancePolicy", 1), `&`, ``, 1) + `,`, + `Orchestration:` + strings.Replace(strings.Replace(this.Orchestration.String(), "OrchestrationConfig", "OrchestrationConfig", 1), `&`, ``, 1) + `,`, + `Raft:` + strings.Replace(strings.Replace(this.Raft.String(), "RaftConfig", "RaftConfig", 1), `&`, ``, 1) + `,`, + `Dispatcher:` + strings.Replace(strings.Replace(this.Dispatcher.String(), "DispatcherConfig", "DispatcherConfig", 1), `&`, ``, 1) + `,`, + `CAConfig:` + strings.Replace(strings.Replace(this.CAConfig.String(), "CAConfig", "CAConfig", 1), `&`, ``, 1) + `,`, + `TaskDefaults:` + strings.Replace(strings.Replace(this.TaskDefaults.String(), "TaskDefaults", "TaskDefaults", 1), `&`, ``, 1) + `,`, + `EncryptionConfig:` + strings.Replace(strings.Replace(this.EncryptionConfig.String(), "EncryptionConfig", "EncryptionConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SecretSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Templating:` + strings.Replace(fmt.Sprintf("%v", this.Templating), "Driver", "Driver", 1) + `,`, + `Driver:` + strings.Replace(fmt.Sprintf("%v", this.Driver), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Templating:` + strings.Replace(fmt.Sprintf("%v", this.Templating), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringSpecs(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *NodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredRole", wireType) + } + m.DesiredRole = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredRole |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + m.Membership = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Membership |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Availability", wireType) + } + m.Availability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Availability |= (NodeSpec_Availability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ReplicatedService{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Mode = &ServiceSpec_Replicated{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Global", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &GlobalService{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Mode = &ServiceSpec_Global{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Update", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Update == nil { + m.Update = &UpdateConfig{} + } + if err := m.Update.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkAttachmentConfig{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Endpoint == nil { + m.Endpoint = &EndpointSpec{} + } + if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rollback", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Rollback == nil { + m.Rollback = &UpdateConfig{} + } + if err := m.Rollback.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicatedService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicatedService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicatedService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlobalService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlobalService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlobalService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ContainerSpec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Runtime = &TaskSpec_Container{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Restart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Restart == nil { + m.Restart = &RestartPolicy{} + } + if err := m.Restart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Placement", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Placement == nil { + m.Placement = &Placement{} + } + if err := m.Placement.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogDriver == nil { + m.LogDriver = &Driver{} + } + if err := m.LogDriver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkAttachmentConfig{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NetworkAttachmentSpec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Runtime = &TaskSpec_Attachment{v} + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceUpdate", wireType) + } + m.ForceUpdate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ForceUpdate |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Generic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &GenericRuntimeSpec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Runtime = &TaskSpec_Generic{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceReferences = append(m.ResourceReferences, ResourceReference{}) + if err := m.ResourceReferences[len(m.ResourceReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceType", wireType) + } + m.ResourceType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourceType |= (ResourceType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericRuntimeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericRuntimeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericRuntimeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payload == nil { + m.Payload = &google_protobuf3.Any{} + } + if err := m.Payload.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, Mount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopGracePeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StopGracePeriod == nil { + m.StopGracePeriod = &google_protobuf1.Duration{} + } + if err := m.StopGracePeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullOptions == nil { + m.PullOptions = &ContainerSpec_PullOptions{} + } + if err := m.PullOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, &SecretReference{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &ContainerSpec_DNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Healthcheck", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Healthcheck == nil { + m.Healthcheck = &HealthConfig{} + } + if err := m.Healthcheck.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OpenStdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OpenStdin = bool(v != 0) + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopSignal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StopSignal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &ConfigReference{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Privileges == nil { + m.Privileges = &Privileges{} + } + if err := m.Privileges.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Init", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Init == nil { + m.Init = &google_protobuf4.BoolValue{} + } + if err := m.Init.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Isolation", wireType) + } + m.Isolation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Isolation |= (ContainerSpec_Isolation(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PidsLimit", wireType) + } + m.PidsLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PidsLimit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSpec_PullOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PullOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PullOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 64: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistryAuth", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RegistryAuth = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSpec_DNSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DNSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DNSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nameservers = append(m.Nameservers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Search", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Search = append(m.Search, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (EndpointSpec_ResolutionMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortConfig{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverConfig == nil { + m.DriverConfig = &Driver{} + } + if err := m.DriverConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ipv6Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ipv6Enabled = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Internal = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPAM", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPAM == nil { + m.IPAM = &IPAMOptions{} + } + if err := m.IPAM.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Attachable = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ingress = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigFrom = &NetworkSpec_Network{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptancePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AcceptancePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Orchestration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Orchestration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raft", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Raft.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dispatcher", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Dispatcher.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CAConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CAConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskDefaults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TaskDefaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EncryptionConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EncryptionConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templating", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Templating == nil { + m.Templating = &Driver{} + } + if err := m.Templating.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Driver == nil { + m.Driver = &Driver{} + } + if err := m.Driver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templating", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Templating == nil { + m.Templating = &Driver{} + } + if err := m.Templating.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSpecs(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSpecs + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSpecs(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSpecs = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSpecs = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/specs.proto", fileDescriptorSpecs) } + +var fileDescriptorSpecs = []byte{ + // 2131 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0x17, 0x25, 0x8a, 0x22, 0xdf, 0x52, 0x36, 0x35, 0x71, 0x9c, 0x15, 0x6d, 0x4b, 0x34, 0xe3, + 0xb8, 0x4a, 0x82, 0x52, 0xa8, 0x1a, 0xa4, 0x4e, 0xdc, 0xb4, 0x25, 0x45, 0x46, 0x66, 0x6d, 0x4b, + 0xc4, 0x50, 0x56, 0x6b, 0xa0, 0x00, 0x31, 0xda, 0x1d, 0x91, 0x03, 0x2d, 0x77, 0xb6, 0xb3, 0x43, + 0x19, 0xbc, 0xf5, 0x18, 0xa8, 0x9f, 0x41, 0xe8, 0xa1, 0xe8, 0xbd, 0xfd, 0x16, 0x3e, 0xf6, 0xd8, + 0x5e, 0x84, 0x44, 0x5f, 0xa1, 0xb7, 0x5e, 0x5a, 0xcc, 0xec, 0xec, 0x92, 0x94, 0x57, 0x96, 0x81, + 0xfa, 0xd0, 0xdb, 0xcc, 0xdb, 0xdf, 0xef, 0xcd, 0xbf, 0xdf, 0xbc, 0xf7, 0x66, 0xe1, 0xb3, 0x3e, + 0x93, 0x83, 0xd1, 0x61, 0xcd, 0xe1, 0xc3, 0x4d, 0x97, 0x3b, 0xc7, 0x54, 0x6c, 0x86, 0xaf, 0x88, + 0x18, 0x1e, 0x33, 0xb9, 0x49, 0x02, 0xb6, 0x19, 0x06, 0xd4, 0x09, 0x6b, 0x81, 0xe0, 0x92, 0x23, + 0x14, 0x01, 0x6a, 0x31, 0xa0, 0x76, 0xf2, 0x93, 0xf2, 0x75, 0x7c, 0x39, 0x0e, 0xa8, 0xe1, 0x97, + 0x6f, 0xf5, 0x79, 0x9f, 0xeb, 0xe6, 0xa6, 0x6a, 0x19, 0xeb, 0x5a, 0x9f, 0xf3, 0xbe, 0x47, 0x37, + 0x75, 0xef, 0x70, 0x74, 0xb4, 0xe9, 0x8e, 0x04, 0x91, 0x8c, 0xfb, 0xe6, 0xfb, 0xea, 0xe5, 0xef, + 0xc4, 0x1f, 0x5f, 0x45, 0x7d, 0x25, 0x48, 0x10, 0x50, 0x61, 0x06, 0xac, 0x9e, 0x65, 0x21, 0xbf, + 0xcb, 0x5d, 0xda, 0x0d, 0xa8, 0x83, 0x76, 0xc0, 0x22, 0xbe, 0xcf, 0xa5, 0xf6, 0x1d, 0xda, 0x99, + 0x4a, 0x66, 0xc3, 0xda, 0x5a, 0xaf, 0xbd, 0xb9, 0xa6, 0x5a, 0x7d, 0x02, 0x6b, 0x64, 0x5f, 0x9f, + 0xaf, 0xcf, 0xe1, 0x69, 0x26, 0xfa, 0x25, 0x14, 0x5d, 0x1a, 0x32, 0x41, 0xdd, 0x9e, 0xe0, 0x1e, + 0xb5, 0xe7, 0x2b, 0x99, 0x8d, 0x1b, 0x5b, 0x77, 0xd3, 0x3c, 0xa9, 0xc1, 0x31, 0xf7, 0x28, 0xb6, + 0x0c, 0x43, 0x75, 0xd0, 0x0e, 0xc0, 0x90, 0x0e, 0x0f, 0xa9, 0x08, 0x07, 0x2c, 0xb0, 0x17, 0x34, + 0xfd, 0x47, 0x57, 0xd1, 0xd5, 0xdc, 0x6b, 0xcf, 0x13, 0x38, 0x9e, 0xa2, 0xa2, 0xe7, 0x50, 0x24, + 0x27, 0x84, 0x79, 0xe4, 0x90, 0x79, 0x4c, 0x8e, 0xed, 0xac, 0x76, 0xf5, 0xe9, 0x5b, 0x5d, 0xd5, + 0xa7, 0x08, 0x78, 0x86, 0x5e, 0x75, 0x01, 0x26, 0x03, 0xa1, 0x87, 0xb0, 0xd4, 0x69, 0xed, 0x36, + 0xdb, 0xbb, 0x3b, 0xa5, 0xb9, 0xf2, 0xea, 0xe9, 0x59, 0xe5, 0x43, 0xe5, 0x63, 0x02, 0xe8, 0x50, + 0xdf, 0x65, 0x7e, 0x1f, 0x6d, 0x40, 0xbe, 0xbe, 0xbd, 0xdd, 0xea, 0xec, 0xb7, 0x9a, 0xa5, 0x4c, + 0xb9, 0x7c, 0x7a, 0x56, 0xb9, 0x3d, 0x0b, 0xac, 0x3b, 0x0e, 0x0d, 0x24, 0x75, 0xcb, 0xd9, 0xef, + 0xfe, 0xbc, 0x36, 0x57, 0xfd, 0x2e, 0x03, 0xc5, 0xe9, 0x49, 0xa0, 0x87, 0x90, 0xab, 0x6f, 0xef, + 0xb7, 0x0f, 0x5a, 0xa5, 0xb9, 0x09, 0x7d, 0x1a, 0x51, 0x77, 0x24, 0x3b, 0xa1, 0xe8, 0x01, 0x2c, + 0x76, 0xea, 0x2f, 0xba, 0xad, 0x52, 0x66, 0x32, 0x9d, 0x69, 0x58, 0x87, 0x8c, 0x42, 0x8d, 0x6a, + 0xe2, 0x7a, 0x7b, 0xb7, 0x34, 0x9f, 0x8e, 0x6a, 0x0a, 0xc2, 0x7c, 0x33, 0x95, 0x3f, 0x65, 0xc1, + 0xea, 0x52, 0x71, 0xc2, 0x9c, 0xf7, 0x2c, 0x91, 0x2f, 0x21, 0x2b, 0x49, 0x78, 0xac, 0xa5, 0x61, + 0xa5, 0x4b, 0x63, 0x9f, 0x84, 0xc7, 0x6a, 0x50, 0x43, 0xd7, 0x78, 0xa5, 0x0c, 0x41, 0x03, 0x8f, + 0x39, 0x44, 0x52, 0x57, 0x2b, 0xc3, 0xda, 0xfa, 0x24, 0x8d, 0x8d, 0x13, 0x94, 0x99, 0xff, 0x93, + 0x39, 0x3c, 0x45, 0x45, 0x8f, 0x21, 0xd7, 0xf7, 0xf8, 0x21, 0xf1, 0xb4, 0x26, 0xac, 0xad, 0xfb, + 0x69, 0x4e, 0x76, 0x34, 0x62, 0xe2, 0xc0, 0x50, 0xd0, 0x23, 0xc8, 0x8d, 0x02, 0x97, 0x48, 0x6a, + 0xe7, 0x34, 0xb9, 0x92, 0x46, 0x7e, 0xa1, 0x11, 0xdb, 0xdc, 0x3f, 0x62, 0x7d, 0x6c, 0xf0, 0xe8, + 0x29, 0xe4, 0x7d, 0x2a, 0x5f, 0x71, 0x71, 0x1c, 0xda, 0x4b, 0x95, 0x85, 0x0d, 0x6b, 0xeb, 0xf3, + 0x54, 0x31, 0x46, 0x98, 0xba, 0x94, 0xc4, 0x19, 0x0c, 0xa9, 0x2f, 0x23, 0x37, 0x8d, 0x79, 0x3b, + 0x83, 0x13, 0x07, 0xe8, 0xe7, 0x90, 0xa7, 0xbe, 0x1b, 0x70, 0xe6, 0x4b, 0x3b, 0x7f, 0xf5, 0x44, + 0x5a, 0x06, 0xa3, 0x36, 0x13, 0x27, 0x0c, 0xc5, 0x16, 0xdc, 0xf3, 0x0e, 0x89, 0x73, 0x6c, 0x17, + 0xde, 0x71, 0x19, 0x09, 0xa3, 0x91, 0x83, 0xec, 0x90, 0xbb, 0xb4, 0xba, 0x09, 0x2b, 0x6f, 0x6c, + 0x35, 0x2a, 0x43, 0xde, 0x6c, 0x75, 0xa4, 0x91, 0x2c, 0x4e, 0xfa, 0xd5, 0x9b, 0xb0, 0x3c, 0xb3, + 0xad, 0xd5, 0xbf, 0x2e, 0x42, 0x3e, 0x3e, 0x6b, 0x54, 0x87, 0x82, 0xc3, 0x7d, 0x49, 0x98, 0x4f, + 0x85, 0x91, 0x57, 0xea, 0xc9, 0x6c, 0xc7, 0x20, 0xc5, 0x7a, 0x32, 0x87, 0x27, 0x2c, 0xf4, 0x2d, + 0x14, 0x04, 0x0d, 0xf9, 0x48, 0x38, 0x34, 0x34, 0xfa, 0xda, 0x48, 0x57, 0x48, 0x04, 0xc2, 0xf4, + 0xf7, 0x23, 0x26, 0xa8, 0xda, 0xe5, 0x10, 0x4f, 0xa8, 0xe8, 0x31, 0x2c, 0x09, 0x1a, 0x4a, 0x22, + 0xe4, 0xdb, 0x24, 0x82, 0x23, 0x48, 0x87, 0x7b, 0xcc, 0x19, 0xe3, 0x98, 0x81, 0x1e, 0x43, 0x21, + 0xf0, 0x88, 0xa3, 0xbd, 0xda, 0x8b, 0x9a, 0x7e, 0x2f, 0x8d, 0xde, 0x89, 0x41, 0x78, 0x82, 0x47, + 0x5f, 0x01, 0x78, 0xbc, 0xdf, 0x73, 0x05, 0x3b, 0xa1, 0xc2, 0x48, 0xac, 0x9c, 0xc6, 0x6e, 0x6a, + 0x04, 0x2e, 0x78, 0xbc, 0x1f, 0x35, 0xd1, 0xce, 0xff, 0xa4, 0xaf, 0x29, 0x6d, 0x3d, 0x05, 0x20, + 0xc9, 0x57, 0xa3, 0xae, 0x4f, 0xdf, 0xc9, 0x95, 0x39, 0x91, 0x29, 0x3a, 0xba, 0x0f, 0xc5, 0x23, + 0x2e, 0x1c, 0xda, 0x33, 0xb7, 0xa6, 0xa0, 0x35, 0x61, 0x69, 0x5b, 0xa4, 0x2f, 0xd4, 0x80, 0xa5, + 0x3e, 0xf5, 0xa9, 0x60, 0x8e, 0x0d, 0x7a, 0xb0, 0x87, 0xa9, 0x17, 0x32, 0x82, 0xe0, 0x91, 0x2f, + 0xd9, 0x90, 0x9a, 0x91, 0x62, 0x22, 0xfa, 0x1d, 0x7c, 0x10, 0x1f, 0x5f, 0x4f, 0xd0, 0x23, 0x2a, + 0xa8, 0xaf, 0x34, 0x60, 0xe9, 0x7d, 0xf8, 0xe4, 0xed, 0x1a, 0x30, 0x68, 0x13, 0x6c, 0x90, 0xb8, + 0xfc, 0x21, 0x6c, 0x14, 0x60, 0x49, 0x44, 0xe3, 0x56, 0xff, 0x98, 0x51, 0xaa, 0xbf, 0x84, 0x40, + 0x9b, 0x60, 0x25, 0xc3, 0x33, 0x57, 0xab, 0xb7, 0xd0, 0xb8, 0x71, 0x71, 0xbe, 0x0e, 0x31, 0xb6, + 0xdd, 0x54, 0x31, 0xc8, 0xb4, 0x5d, 0xd4, 0x82, 0xe5, 0x84, 0xa0, 0xca, 0x00, 0x93, 0x28, 0x2b, + 0x6f, 0x9b, 0xe9, 0xfe, 0x38, 0xa0, 0xb8, 0x28, 0xa6, 0x7a, 0xd5, 0xdf, 0x02, 0x7a, 0x73, 0x5f, + 0x10, 0x82, 0xec, 0x31, 0xf3, 0xcd, 0x34, 0xb0, 0x6e, 0xa3, 0x1a, 0x2c, 0x05, 0x64, 0xec, 0x71, + 0xe2, 0x9a, 0x8b, 0x71, 0xab, 0x16, 0x15, 0x08, 0xb5, 0xb8, 0x40, 0xa8, 0xd5, 0xfd, 0x31, 0x8e, + 0x41, 0xd5, 0xa7, 0xf0, 0x61, 0xea, 0xf1, 0xa2, 0x2d, 0x28, 0x26, 0x17, 0x6e, 0xb2, 0xd6, 0x9b, + 0x17, 0xe7, 0xeb, 0x56, 0x72, 0x33, 0xdb, 0x4d, 0x6c, 0x25, 0xa0, 0xb6, 0x5b, 0xfd, 0xde, 0x82, + 0xe5, 0x99, 0x6b, 0x8b, 0x6e, 0xc1, 0x22, 0x1b, 0x92, 0x3e, 0x35, 0x73, 0x8c, 0x3a, 0xa8, 0x05, + 0x39, 0x8f, 0x1c, 0x52, 0x4f, 0x5d, 0x5e, 0x75, 0x70, 0x3f, 0xbe, 0xf6, 0xfe, 0xd7, 0x9e, 0x69, + 0x7c, 0xcb, 0x97, 0x62, 0x8c, 0x0d, 0x19, 0xd9, 0xb0, 0xe4, 0xf0, 0xe1, 0x90, 0xf8, 0x2a, 0x4d, + 0x2c, 0x6c, 0x14, 0x70, 0xdc, 0x55, 0x3b, 0x43, 0x44, 0x3f, 0xb4, 0xb3, 0xda, 0xac, 0xdb, 0xa8, + 0x04, 0x0b, 0xd4, 0x3f, 0xb1, 0x17, 0xb5, 0x49, 0x35, 0x95, 0xc5, 0x65, 0xd1, 0xed, 0x2b, 0x60, + 0xd5, 0x54, 0xbc, 0x51, 0x48, 0x85, 0xbd, 0x14, 0xed, 0xa8, 0x6a, 0xa3, 0x9f, 0x41, 0x6e, 0xc8, + 0x47, 0xbe, 0x0c, 0xed, 0xbc, 0x9e, 0xec, 0x6a, 0xda, 0x64, 0x9f, 0x2b, 0x84, 0x51, 0x96, 0x81, + 0xa3, 0x16, 0xac, 0x84, 0x92, 0x07, 0xbd, 0xbe, 0x20, 0x0e, 0xed, 0x05, 0x54, 0x30, 0xee, 0x9a, + 0x30, 0xbc, 0xfa, 0xc6, 0xa1, 0x34, 0x4d, 0xc1, 0x87, 0x6f, 0x2a, 0xce, 0x8e, 0xa2, 0x74, 0x34, + 0x03, 0x75, 0xa0, 0x18, 0x8c, 0x3c, 0xaf, 0xc7, 0x83, 0x28, 0x23, 0x47, 0x77, 0xe7, 0x1d, 0xb6, + 0xac, 0x33, 0xf2, 0xbc, 0xbd, 0x88, 0x84, 0xad, 0x60, 0xd2, 0x41, 0xb7, 0x21, 0xd7, 0x17, 0x7c, + 0x14, 0x44, 0xf7, 0xa6, 0x80, 0x4d, 0x0f, 0x7d, 0x03, 0x4b, 0x21, 0x75, 0x04, 0x95, 0xa1, 0x5d, + 0xd4, 0x4b, 0xfd, 0x38, 0x6d, 0x90, 0xae, 0x86, 0x24, 0x77, 0x02, 0xc7, 0x1c, 0xb4, 0x0a, 0x0b, + 0x52, 0x8e, 0xed, 0xe5, 0x4a, 0x66, 0x23, 0xdf, 0x58, 0xba, 0x38, 0x5f, 0x5f, 0xd8, 0xdf, 0x7f, + 0x89, 0x95, 0x4d, 0x65, 0x8b, 0x01, 0x0f, 0xa5, 0x4f, 0x86, 0xd4, 0xbe, 0xa1, 0xf7, 0x36, 0xe9, + 0xa3, 0x97, 0x00, 0xae, 0x1f, 0xf6, 0x1c, 0x1d, 0x9e, 0xec, 0x9b, 0x7a, 0x75, 0x9f, 0x5f, 0xbf, + 0xba, 0xe6, 0x6e, 0xd7, 0x64, 0xcc, 0xe5, 0x8b, 0xf3, 0xf5, 0x42, 0xd2, 0xc5, 0x05, 0xd7, 0x0f, + 0xa3, 0x26, 0x6a, 0x80, 0x35, 0xa0, 0xc4, 0x93, 0x03, 0x67, 0x40, 0x9d, 0x63, 0xbb, 0x74, 0x75, + 0x0a, 0x7c, 0xa2, 0x61, 0xc6, 0xc3, 0x34, 0x49, 0x29, 0x58, 0x4d, 0x35, 0xb4, 0x57, 0xf4, 0x5e, + 0x45, 0x1d, 0x74, 0x0f, 0x80, 0x07, 0xd4, 0xef, 0x85, 0xd2, 0x65, 0xbe, 0x8d, 0xd4, 0x92, 0x71, + 0x41, 0x59, 0xba, 0xca, 0x80, 0xee, 0xa8, 0x04, 0x45, 0xdc, 0x1e, 0xf7, 0xbd, 0xb1, 0xfd, 0x81, + 0xfe, 0x9a, 0x57, 0x86, 0x3d, 0xdf, 0x1b, 0xa3, 0x75, 0xb0, 0xb4, 0x2e, 0x42, 0xd6, 0xf7, 0x89, + 0x67, 0xdf, 0xd2, 0xfb, 0x01, 0xca, 0xd4, 0xd5, 0x16, 0x75, 0x0e, 0xd1, 0x6e, 0x84, 0xf6, 0x87, + 0x57, 0x9f, 0x83, 0x99, 0xec, 0xe4, 0x1c, 0x0c, 0x07, 0xfd, 0x02, 0x20, 0x10, 0xec, 0x84, 0x79, + 0xb4, 0x4f, 0x43, 0xfb, 0xb6, 0x5e, 0xf4, 0x5a, 0x6a, 0x66, 0x4a, 0x50, 0x78, 0x8a, 0x81, 0x6a, + 0x90, 0x65, 0x3e, 0x93, 0xf6, 0x47, 0x26, 0x2b, 0x5d, 0x96, 0x6a, 0x83, 0x73, 0xef, 0x80, 0x78, + 0x23, 0x8a, 0x35, 0x0e, 0xb5, 0xa1, 0xc0, 0x42, 0xee, 0x69, 0xf9, 0xda, 0xb6, 0x8e, 0x6f, 0xef, + 0x70, 0x7e, 0xed, 0x98, 0x82, 0x27, 0x6c, 0x74, 0x17, 0x0a, 0x01, 0x73, 0xc3, 0x67, 0x6c, 0xc8, + 0xa4, 0xbd, 0x5a, 0xc9, 0x6c, 0x2c, 0xe0, 0x89, 0xa1, 0xfc, 0x15, 0x58, 0x53, 0x61, 0x40, 0x5d, + 0xdf, 0x63, 0x3a, 0x36, 0x91, 0x45, 0x35, 0xd5, 0x59, 0x9d, 0xa8, 0x89, 0xe9, 0xd0, 0x57, 0xc0, + 0x51, 0xe7, 0xeb, 0xf9, 0x47, 0x99, 0xf2, 0x16, 0x58, 0x53, 0xd7, 0x01, 0x7d, 0xac, 0xc2, 0x72, + 0x9f, 0x85, 0x52, 0x8c, 0x7b, 0x64, 0x24, 0x07, 0xf6, 0xaf, 0x34, 0xa1, 0x18, 0x1b, 0xeb, 0x23, + 0x39, 0x28, 0xf7, 0x60, 0xa2, 0x2a, 0x54, 0x01, 0x4b, 0xa9, 0x35, 0xa4, 0xe2, 0x84, 0x0a, 0x55, + 0xf2, 0x28, 0x31, 0x4c, 0x9b, 0xd4, 0xad, 0x0a, 0x29, 0x11, 0xce, 0x40, 0x07, 0xb5, 0x02, 0x36, + 0x3d, 0x15, 0xa5, 0xe2, 0xab, 0x6b, 0xa2, 0x94, 0xe9, 0x56, 0xff, 0x96, 0x81, 0x42, 0xb2, 0x0d, + 0xe8, 0x0b, 0x58, 0x69, 0x77, 0xf7, 0x9e, 0xd5, 0xf7, 0xdb, 0x7b, 0xbb, 0xbd, 0x66, 0xeb, 0xdb, + 0xfa, 0x8b, 0x67, 0xfb, 0xa5, 0xb9, 0xf2, 0xbd, 0xd3, 0xb3, 0xca, 0xea, 0x24, 0xe2, 0xc6, 0xf0, + 0x26, 0x3d, 0x22, 0x23, 0x4f, 0xce, 0xb2, 0x3a, 0x78, 0x6f, 0xbb, 0xd5, 0xed, 0x96, 0x32, 0x57, + 0xb1, 0x3a, 0x82, 0x3b, 0x34, 0x0c, 0xd1, 0x16, 0x94, 0x26, 0xac, 0x27, 0x2f, 0x3b, 0x2d, 0x7c, + 0x50, 0x9a, 0x2f, 0xdf, 0x3d, 0x3d, 0xab, 0xd8, 0x6f, 0x92, 0x9e, 0x8c, 0x03, 0x2a, 0x0e, 0xcc, + 0x73, 0xe1, 0x5f, 0x19, 0x28, 0x4e, 0x57, 0x9b, 0x68, 0x3b, 0xaa, 0x12, 0xf5, 0x31, 0xdc, 0xd8, + 0xda, 0xbc, 0xae, 0x3a, 0xd5, 0x59, 0xce, 0x1b, 0x29, 0xbf, 0xcf, 0xd5, 0xc3, 0x50, 0x93, 0xd1, + 0x17, 0xb0, 0x18, 0x70, 0x21, 0xe3, 0x7c, 0x90, 0xae, 0x56, 0x2e, 0xe2, 0x1a, 0x26, 0x02, 0x57, + 0x07, 0x70, 0x63, 0xd6, 0x1b, 0x7a, 0x00, 0x0b, 0x07, 0xed, 0x4e, 0x69, 0xae, 0x7c, 0xe7, 0xf4, + 0xac, 0xf2, 0xd1, 0xec, 0xc7, 0x03, 0x26, 0xe4, 0x88, 0x78, 0xed, 0x0e, 0xfa, 0x0c, 0x16, 0x9b, + 0xbb, 0x5d, 0x8c, 0x4b, 0x99, 0xf2, 0xfa, 0xe9, 0x59, 0xe5, 0xce, 0x2c, 0x4e, 0x7d, 0xe2, 0x23, + 0xdf, 0xc5, 0xfc, 0x30, 0x79, 0x24, 0xfd, 0x7b, 0x1e, 0x2c, 0x93, 0x26, 0xdf, 0xf7, 0x3b, 0x7a, + 0x39, 0xaa, 0x01, 0xe3, 0xf8, 0x37, 0x7f, 0x6d, 0x29, 0x58, 0x8c, 0x08, 0x46, 0x97, 0xf7, 0xa1, + 0xc8, 0x82, 0x93, 0x2f, 0x7b, 0xd4, 0x27, 0x87, 0x9e, 0x79, 0x2f, 0xe5, 0xb1, 0xa5, 0x6c, 0xad, + 0xc8, 0xa4, 0x82, 0x2f, 0xf3, 0x25, 0x15, 0xbe, 0x79, 0x09, 0xe5, 0x71, 0xd2, 0x47, 0xdf, 0x40, + 0x96, 0x05, 0x64, 0x68, 0xea, 0xd7, 0xd4, 0x15, 0xb4, 0x3b, 0xf5, 0xe7, 0xe6, 0xde, 0x34, 0xf2, + 0x17, 0xe7, 0xeb, 0x59, 0x65, 0xc0, 0x9a, 0x86, 0xd6, 0xe2, 0x12, 0x52, 0x8d, 0xa4, 0x13, 0x69, + 0x1e, 0x4f, 0x59, 0x94, 0xf6, 0x99, 0xdf, 0x17, 0x34, 0x0c, 0x75, 0x4a, 0xcd, 0xe3, 0xb8, 0x8b, + 0xca, 0xb0, 0x64, 0x0a, 0x51, 0x5d, 0x79, 0x16, 0x54, 0x91, 0x67, 0x0c, 0x8d, 0x65, 0xb0, 0xa2, + 0xdd, 0xe8, 0x1d, 0x09, 0x3e, 0xac, 0xfe, 0x27, 0x0b, 0xd6, 0xb6, 0x37, 0x0a, 0xa5, 0xa9, 0x29, + 0xde, 0xdb, 0xe6, 0xbf, 0x84, 0x15, 0xa2, 0xdf, 0xe5, 0xc4, 0x57, 0x09, 0x5a, 0xd7, 0xf7, 0xe6, + 0x00, 0x1e, 0xa4, 0xba, 0x4b, 0xc0, 0xd1, 0x5b, 0xa0, 0x91, 0x53, 0x3e, 0xed, 0x0c, 0x2e, 0x91, + 0x4b, 0x5f, 0x50, 0x17, 0x96, 0xb9, 0x70, 0x06, 0x34, 0x94, 0x51, 0x5a, 0x37, 0xef, 0xd8, 0xd4, + 0x3f, 0x1c, 0x7b, 0xd3, 0x40, 0x93, 0xd3, 0xa2, 0xd9, 0xce, 0xfa, 0x40, 0x8f, 0x20, 0x2b, 0xc8, + 0x51, 0xfc, 0x56, 0x49, 0xbd, 0x24, 0x98, 0x1c, 0xc9, 0x19, 0x17, 0x9a, 0x81, 0x7e, 0x0d, 0xe0, + 0xb2, 0x30, 0x20, 0xd2, 0x19, 0x50, 0x61, 0x0e, 0x3b, 0x75, 0x89, 0xcd, 0x04, 0x35, 0xe3, 0x65, + 0x8a, 0x8d, 0x9e, 0x42, 0xc1, 0x21, 0xb1, 0x5c, 0x73, 0x57, 0x3f, 0xee, 0xb7, 0xeb, 0xc6, 0x45, + 0x49, 0xb9, 0xb8, 0x38, 0x5f, 0xcf, 0xc7, 0x16, 0x9c, 0x77, 0x88, 0x91, 0xef, 0x53, 0x58, 0x56, + 0x8f, 0xfe, 0x9e, 0x1b, 0x85, 0xb3, 0x48, 0x26, 0x57, 0xe4, 0x68, 0xf5, 0x82, 0x34, 0x61, 0x2f, + 0x3e, 0xce, 0xa2, 0x9c, 0xb2, 0xa1, 0xdf, 0xc0, 0x0a, 0xf5, 0x1d, 0x31, 0xd6, 0x62, 0x8d, 0x67, + 0x98, 0xbf, 0x7a, 0xb1, 0xad, 0x04, 0x3c, 0xb3, 0xd8, 0x12, 0xbd, 0x64, 0xaf, 0xfe, 0x33, 0x03, + 0x10, 0x95, 0x3d, 0xef, 0x57, 0x80, 0x08, 0xb2, 0x2e, 0x91, 0x44, 0x6b, 0xae, 0x88, 0x75, 0x1b, + 0x7d, 0x0d, 0x20, 0xe9, 0x30, 0x50, 0xa1, 0xd7, 0xef, 0x1b, 0xd9, 0xbc, 0x2d, 0x1c, 0x4c, 0xa1, + 0xd1, 0x16, 0xe4, 0xcc, 0x8b, 0x32, 0x7b, 0x2d, 0xcf, 0x20, 0xab, 0x7f, 0xc9, 0x00, 0x44, 0xcb, + 0xfc, 0xbf, 0x5e, 0x5b, 0xc3, 0x7e, 0xfd, 0xc3, 0xda, 0xdc, 0x3f, 0x7e, 0x58, 0x9b, 0xfb, 0xc3, + 0xc5, 0x5a, 0xe6, 0xf5, 0xc5, 0x5a, 0xe6, 0xef, 0x17, 0x6b, 0x99, 0xef, 0x2f, 0xd6, 0x32, 0x87, + 0x39, 0x5d, 0x99, 0xfc, 0xf4, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xa3, 0x85, 0xdc, 0xc7, + 0x15, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/specs.proto b/extender/code/vendor/github.com/docker/swarmkit/api/specs.proto new file mode 100644 index 000000000..14448d040 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/specs.proto @@ -0,0 +1,456 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +// Specs are container objects for user provided input. All creations and +// updates are done through spec types. As a convention, user input from a spec +// is never touched in created objects. This allows one to verify that the +// users intent has not been modified. +// +// Put differently, spec types can be said to represent the desired state of +// the system. In situations where modifications need to be made to a +// particular component, API objects will either contain a copy of the spec +// component or a different representation to reflect allocation or resolution. + +message NodeSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + enum Membership { + option (gogoproto.goproto_enum_prefix) = false; + + PENDING = 0 [(gogoproto.enumvalue_customname) = "NodeMembershipPending"]; + ACCEPTED = 1 [(gogoproto.enumvalue_customname) = "NodeMembershipAccepted"]; + } + + enum Availability { + option (gogoproto.goproto_enum_prefix) = false; + + // Active nodes. + ACTIVE = 0 [(gogoproto.enumvalue_customname) = "NodeAvailabilityActive"]; + + // Paused nodes won't be considered by the scheduler, preventing any + // further task to run on them. + PAUSE = 1 [(gogoproto.enumvalue_customname) = "NodeAvailabilityPause"]; + + // Drained nodes are paused and any task already running on them will + // be evicted. + DRAIN = 2 [(gogoproto.enumvalue_customname) = "NodeAvailabilityDrain"]; + } + + // DesiredRole defines the role the node should have. + NodeRole desired_role = 2; + + // Membership controls the admission of the node into the cluster. + Membership membership = 3; + + // Availability allows a user to control the current scheduling status of a + // node. + Availability availability = 4; +} + +// ServiceSpec defines the properties of a service. +// +// A service instructs the cluster in orchestrating repeated instances of a +// template, implemented as tasks. Based on the number of instances, scheduling +// strategy and restart policy, a number of application-level behaviors can be +// defined. +message ServiceSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // Task defines the task template this service will spawn. + TaskSpec task = 2 [(gogoproto.nullable) = false]; + + oneof mode { + ReplicatedService replicated = 3; + GlobalService global = 4; + } + + // Update contains settings which affect updates. + UpdateConfig update = 6; + + // Rollback contains settings which affect rollbacks of updates. + UpdateConfig rollback = 9; + + // ServiceSpec.Networks has been deprecated and is replaced by + // Networks field in Task (TaskSpec.Networks). + // This field (ServiceSpec.Networks) is kept for compatibility. + // In case TaskSpec.Networks does not exist, ServiceSpec.Networks + // is still honored if it exists. + repeated NetworkAttachmentConfig networks = 7 [deprecated=true]; + + // Service endpoint specifies the user provided configuration + // to properly discover and load balance a service. + EndpointSpec endpoint = 8; +} + +// ReplicatedService sets the reconciliation target to certain number of replicas. +message ReplicatedService { + uint64 replicas = 1; +} + +// GlobalService represents global service. +message GlobalService { + // Empty message for now. +} + +message TaskSpec { + oneof runtime { + NetworkAttachmentSpec attachment = 8; + ContainerSpec container = 1; + GenericRuntimeSpec generic = 10; + } + + // Resource requirements for the container. + ResourceRequirements resources = 2; + + // RestartPolicy specifies what to do when a task fails or finishes. + RestartPolicy restart = 4; + + // Placement specifies node selection constraints + Placement placement = 5; + + // LogDriver specifies the log driver to use for the task. Any runtime will + // direct logs into the specified driver for the duration of the task. + Driver log_driver = 6; + + // Networks specifies the list of network attachment + // configurations (which specify the network and per-network + // aliases) that this task spec is bound to. + repeated NetworkAttachmentConfig networks = 7; + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. We do this to allow forced restarts + // using the same reconciliation-based mechanism that performs rolling + // updates. + uint64 force_update = 9; + + // ResourceReferences provides a generic way to specify resources that + // are used by this task, and should be sent down to agents along with + // the task. Inside the runtime field there may be more specific + // information about how to use the resource, but ResourceReferences + // establishes the relationship at the store level, and instructs the + // dispatcher to send the related objects. + // + // ResourceReferences is a list of ResourceReferences used by the task. + repeated ResourceReference resource_references = 11 [(gogoproto.nullable) = false]; +} + +message ResourceReference { + string resource_id = 1; + ResourceType resource_type = 2; +} + +message GenericRuntimeSpec { + string kind = 1; + google.protobuf.Any payload = 2; +} + +// NetworkAttachmentSpec specifies runtime parameters required to attach +// a container to a network. +message NetworkAttachmentSpec { + // ContainerID specifies a unique ID of the container for which + // this attachment is for. + string container_id = 1; +} + + +// Container specifies runtime parameters for a container. +message ContainerSpec { + // image defines the image reference, as specified in the + // distribution/reference package. This may include a registry host, name, + // tag or digest. + // + // The field will be directly passed to the engine pulling. Well-behaved + // service definitions will used immutable references, either through tags + // that don't change or verifiable digests. + string image = 1; + + // Labels defines labels to be added to the container at creation time. If + // collisions with system labels occur, these labels will be overridden. + // + // This field *must* remain compatible with the Labels field of + // Annotations. + map labels = 2; + + // Command to run the the container. The first element is a path to the + // executable and the following elements are treated as arguments. + // + // If command is empty, execution will fall back to the image's entrypoint. + // + // Command should only be used when overriding entrypoint. + repeated string command = 3; + + // Args specifies arguments provided to the image's entrypoint. + // + // If Command and Args are provided, Args will be appended to Command. + repeated string args = 4; + + // Hostname specifies the hostname that will be set on containers created by docker swarm. + // All containers for a given service will have the same hostname + string hostname = 14; + + // Env specifies the environment variables for the container in NAME=VALUE + // format. These must be compliant with [IEEE Std + // 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html). + repeated string env = 5; + + // Dir defines the working directory to set for the container process. + string dir = 6; + + // User specifies the user that should be employed to run the container. + // + // Note that the primary group may be specified by appending the group name + // or id to the user name, separated by a `:`. This syntax is + // `:`. + string user = 7; + + // Groups specifies supplementary groups available to the user. + repeated string groups = 11; + + // Privileges specifies security configuration/permissions. + Privileges privileges = 22; + + // Init declares that a custom init will be running inside the container, if null, use the daemon's configured settings + google.protobuf.BoolValue init = 23; + + // TTY declares that a TTY should be attached to the standard streams, + // including stdin if it is still open. + bool tty = 13 [(gogoproto.customname) = "TTY"]; + + // OpenStdin declares that the standard input (stdin) should be open. + bool open_stdin = 18; + + // ReadOnly declares that the container root filesystem is read-only. + // This only impacts the root filesystem, not additional mounts (including + // tmpfs). For additional mounts that are not part of the initial rootfs, + // they will be decided by the modes passed in the mount definition. + bool read_only = 19; + + // StopSignal defines the signal to stop the container. + string stop_signal = 20; + + repeated Mount mounts = 8 [(gogoproto.nullable) = false]; + + // StopGracePeriod the grace period for stopping the container before + // forcefully killing the container. + // Note: Can't use stdduration here because this needs to be nullable. + google.protobuf.Duration stop_grace_period = 9; + + // PullOptions allows one to parameterize an image pull. + message PullOptions { + // RegistryAuth is the registry auth token obtained from the client, required + // to pull private images. This is the unmodified JSON used as part of + // the `X-Registry-Auth` header. + // TODO(nishanttotla): This field will later be deprecated + string registry_auth = 64; + } + + // PullOptions parameterize the behavior of image pulls. + PullOptions pull_options = 10; + + // SecretReference contains references to zero or more secrets that + // will be exposed to the container. + repeated SecretReference secrets = 12; + + // ConfigReference contains references to zero or more configs that + // will be exposed to the container. + repeated ConfigReference configs = 21; + + // Hosts allow additional entries to be specified in /etc/hosts + // that associates IP addresses with hostnames. + // Detailed documentation is available in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // + // The format of the Hosts in swarmkit follows the same as + // above. + // This is different from `docker run --add-host :` + // where format is `:` + repeated string hosts = 17; + + // DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) + // Detailed documentation is available in: + // http://man7.org/linux/man-pages/man5/resolv.conf.5.html + // TODO: domain is not supported yet + message DNSConfig { + // Nameservers specifies the IP addresses of the name servers + repeated string nameservers = 1; + + // Search specifies the search list for host-name lookup + repeated string search = 2; + + // Options allows certain internal resolver variables to be modified + repeated string options = 3; + } + + // DNSConfig allows one to specify DNS related configuration in resolv.conf + DNSConfig dns_config = 15 [(gogoproto.customname) = "DNSConfig"]; + + // Healthcheck describes how to check the container is healthy. If the + // container is considered unhealthy, it will be destroyed, its creating + // task will exit and a new task will be rescheduled elsewhere. A container + // is considered unhealthy after `Retries` number of consecutive failures. + HealthConfig healthcheck = 16; + + enum Isolation { + option (gogoproto.goproto_enum_prefix) = false; + + // ISOLATION_DEFAULT uses whatever default value from the container runtime + ISOLATION_DEFAULT = 0 [(gogoproto.enumvalue_customname) = "ContainerIsolationDefault"]; + + // ISOLATION_PROCESS forces windows container isolation + ISOLATION_PROCESS = 1 [(gogoproto.enumvalue_customname) = "ContainerIsolationProcess"]; + + // ISOLATION_HYPERV forces Hyper-V isolation + ISOLATION_HYPERV = 2 [(gogoproto.enumvalue_customname) = "ContainerIsolationHyperV"]; + } + + // Isolation defines the isolation level for windows containers (default, process, hyperv). + // Runtimes that don't support it ignore that field + Isolation isolation = 24; + + // PidsLimit prevents from OS resource damage by applications inside the container + // using fork bomb attack. + int64 pidsLimit = 25; +} + +// EndpointSpec defines the properties that can be configured to +// access and loadbalance the service. +message EndpointSpec { + // ResolutionMode specifies the mode of resolution to use for + // internal loadbalancing between tasks which are all within + // the cluster. This is sometimes calls east-west data path. + enum ResolutionMode { + option (gogoproto.goproto_enum_prefix) = false; + + // VIP resolution mode specifies that the + // service resolves to a logical IP and the requests + // are sent to that logical IP. Packets hitting that + // logical IP are load balanced to a chosen backend. + VIP = 0 [(gogoproto.enumvalue_customname) = "ResolutionModeVirtualIP"]; + + // DNSRR resolution mode specifies that the + // service directly gets resolved to one of the + // backend IP and the client directly initiates a + // request towards the actual backend. This requires + // that the client does not cache the DNS responses + // when the DNS response TTL is 0. + DNSRR = 1 [(gogoproto.enumvalue_customname) = "ResolutionModeDNSRoundRobin"]; + } + + ResolutionMode mode = 1; + + // List of exposed ports that this service is accessible from + // external to the cluster. + repeated PortConfig ports = 2; +} + +// NetworkSpec specifies user defined network parameters. +message NetworkSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // DriverConfig specific configuration consumed by the network driver. + Driver driver_config = 2; + + // IPv6Enabled enables support for IPv6 on the network. + bool ipv6_enabled = 3; + + // internal restricts external access to the network. This may be + // accomplished by disabling the default gateway or through other means. + bool internal = 4; + + IPAMOptions ipam = 5 [(gogoproto.customname) = "IPAM"]; + + // Attachable allows external(to swarm) entities to manually + // attach to this network. With this flag enabled, external + // entities such as containers running in an worker node in + // the cluster can manually attach to this network and access + // the services attached to this network. If this flag is not + // enabled(default case) no manual attachment to this network + // can happen. + bool attachable = 6; + + // Ingress indicates this network will provide the routing-mesh. + // In older versions, the network providing the routing mesh was + // swarm internally created only and it was identified by the name + // "ingress" and the label "com.docker.swarm.internal": "true". + bool ingress = 7; + + // ConfigFrom is the source of the configuration for this network. + oneof config_from { + // Network is the name of a network that provides the network + // specific configuration for this network, locally on the node + // where this network is being plumbed. + string network = 8; + } + +} + +// ClusterSpec specifies global cluster settings. +message ClusterSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // DEPRECATED: AcceptancePolicy defines the certificate issuance policy. + // Acceptance policy is no longer customizable, and secrets have been + // replaced with join tokens. + AcceptancePolicy acceptance_policy = 2 [deprecated=true, (gogoproto.nullable) = false]; + + // Orchestration defines cluster-level orchestration settings. + OrchestrationConfig orchestration = 3 [(gogoproto.nullable) = false]; + + // Raft defines the cluster's raft settings. + RaftConfig raft = 4 [(gogoproto.nullable) = false]; + + // Dispatcher defines cluster-level dispatcher settings. + DispatcherConfig dispatcher = 5 [(gogoproto.nullable) = false]; + + // CAConfig defines cluster-level certificate authority settings. + CAConfig ca_config = 6 [(gogoproto.nullable) = false, (gogoproto.customname) = "CAConfig"]; + + // TaskDefaults specifies the default values to use for task creation. + TaskDefaults task_defaults = 7 [(gogoproto.nullable) = false]; + + // EncryptionConfig defines the cluster's encryption settings. + EncryptionConfig encryption_config = 8 [(gogoproto.nullable) = false]; +} + +// SecretSpec specifies a user-provided secret. +message SecretSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // Data is the secret payload - the maximum size is 500KB (that is, 500*1024 bytes) + bytes data = 2; + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Driver templating = 3; + + // Driver is the the secret driver that is used to store the specified secret + Driver driver = 4; +} + +// ConfigSpec specifies user-provided configuration files. +message ConfigSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // Data is the config payload - the maximum size is 500KB (that is, 500*1024 bytes) + // TODO(aaronl): Do we want to revise this to include multiple payloads in a single + // ConfigSpec? Define this to be a tar? etc... + bytes data = 2; + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Driver templating = 3; +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/storeobject.go b/extender/code/vendor/github.com/docker/swarmkit/api/storeobject.go new file mode 100644 index 000000000..d140fa3e0 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/storeobject.go @@ -0,0 +1,123 @@ +package api + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/go-events" +) + +var ( + errUnknownStoreAction = errors.New("unrecognized action type") + errConflictingFilters = errors.New("conflicting filters specified") + errNoKindSpecified = errors.New("no kind of object specified") + errUnrecognizedAction = errors.New("unrecognized action") +) + +// StoreObject is an abstract object that can be handled by the store. +type StoreObject interface { + GetID() string // Get ID + GetMeta() Meta // Retrieve metadata + SetMeta(Meta) // Set metadata + CopyStoreObject() StoreObject // Return a copy of this object + EventCreate() Event // Return a creation event + EventUpdate(oldObject StoreObject) Event // Return an update event + EventDelete() Event // Return a deletion event +} + +// Event is the type used for events passed over watcher channels, and also +// the type used to specify filtering in calls to Watch. +type Event interface { + // TODO(stevvooe): Consider whether it makes sense to squish both the + // matcher type and the primary type into the same type. It might be better + // to build a matcher from an event prototype. + + // Matches checks if this item in a watch queue Matches the event + // description. + Matches(events.Event) bool +} + +// EventCreate is an interface implemented by every creation event type +type EventCreate interface { + IsEventCreate() bool +} + +// EventUpdate is an interface impelemented by every update event type +type EventUpdate interface { + IsEventUpdate() bool +} + +// EventDelete is an interface implemented by every delete event type +type EventDelete interface { + IsEventDelete() +} + +func customIndexer(kind string, annotations *Annotations) (bool, [][]byte, error) { + var converted [][]byte + + for _, entry := range annotations.Indices { + index := make([]byte, 0, len(kind)+1+len(entry.Key)+1+len(entry.Val)+1) + if kind != "" { + index = append(index, []byte(kind)...) + index = append(index, '|') + } + index = append(index, []byte(entry.Key)...) + index = append(index, '|') + index = append(index, []byte(entry.Val)...) + index = append(index, '\x00') + converted = append(converted, index) + } + + // Add the null character as a terminator + return len(converted) != 0, converted, nil +} + +func fromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func prefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := fromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +func checkCustom(a1, a2 Annotations) bool { + if len(a1.Indices) == 1 { + for _, ind := range a2.Indices { + if ind.Key == a1.Indices[0].Key && ind.Val == a1.Indices[0].Val { + return true + } + } + } + return false +} + +func checkCustomPrefix(a1, a2 Annotations) bool { + if len(a1.Indices) == 1 { + for _, ind := range a2.Indices { + if ind.Key == a1.Indices[0].Key && strings.HasPrefix(ind.Val, a1.Indices[0].Val) { + return true + } + } + } + return false +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/types.pb.go b/extender/code/vendor/github.com/docker/swarmkit/api/types.pb.go new file mode 100644 index 000000000..2ddc00756 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/types.pb.go @@ -0,0 +1,17404 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/types.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/types" +import google_protobuf1 "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/gogoproto" + +import os "os" +import time "time" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import binary "encoding/binary" +import types "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +type ResourceType int32 + +const ( + ResourceType_TASK ResourceType = 0 + ResourceType_SECRET ResourceType = 1 + ResourceType_CONFIG ResourceType = 2 +) + +var ResourceType_name = map[int32]string{ + 0: "TASK", + 1: "SECRET", + 2: "CONFIG", +} +var ResourceType_value = map[string]int32{ + "TASK": 0, + "SECRET": 1, + "CONFIG": 2, +} + +func (x ResourceType) String() string { + return proto.EnumName(ResourceType_name, int32(x)) +} +func (ResourceType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } + +// Only the manager create a NEW task, and move the task to PENDING and ASSIGNED. +// Afterward, the manager must rely on the agent to update the task status +// (pre-run: preparing, ready, starting; +// running; +// end-state: complete, shutdown, failed, rejected) +type TaskState int32 + +const ( + TaskStateNew TaskState = 0 + TaskStatePending TaskState = 64 + TaskStateAssigned TaskState = 192 + TaskStateAccepted TaskState = 256 + TaskStatePreparing TaskState = 320 + TaskStateReady TaskState = 384 + TaskStateStarting TaskState = 448 + TaskStateRunning TaskState = 512 + TaskStateCompleted TaskState = 576 + TaskStateShutdown TaskState = 640 + TaskStateFailed TaskState = 704 + // TaskStateRejected means a task never ran, for instance if something about + // the environment failed (e.g. setting up a port on that node failed). + TaskStateRejected TaskState = 768 + // TaskStateRemove is used to correctly handle service deletions and scale + // downs. This allows us to keep track of tasks that have been marked for + // deletion, but can't yet be removed because the agent is in the process of + // shutting them down. Once the agent has shut down tasks with desired state + // REMOVE, the task reaper is responsible for removing them. + TaskStateRemove TaskState = 800 + // TaskStateOrphaned is used to free up resources associated with service + // tasks on unresponsive nodes without having to delete those tasks. This + // state is directly assigned to the task by the orchestrator. + TaskStateOrphaned TaskState = 832 +) + +var TaskState_name = map[int32]string{ + 0: "NEW", + 64: "PENDING", + 192: "ASSIGNED", + 256: "ACCEPTED", + 320: "PREPARING", + 384: "READY", + 448: "STARTING", + 512: "RUNNING", + 576: "COMPLETE", + 640: "SHUTDOWN", + 704: "FAILED", + 768: "REJECTED", + 800: "REMOVE", + 832: "ORPHANED", +} +var TaskState_value = map[string]int32{ + "NEW": 0, + "PENDING": 64, + "ASSIGNED": 192, + "ACCEPTED": 256, + "PREPARING": 320, + "READY": 384, + "STARTING": 448, + "RUNNING": 512, + "COMPLETE": 576, + "SHUTDOWN": 640, + "FAILED": 704, + "REJECTED": 768, + "REMOVE": 800, + "ORPHANED": 832, +} + +func (x TaskState) String() string { + return proto.EnumName(TaskState_name, int32(x)) +} +func (TaskState) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } + +type NodeRole int32 + +const ( + NodeRoleWorker NodeRole = 0 + NodeRoleManager NodeRole = 1 +) + +var NodeRole_name = map[int32]string{ + 0: "WORKER", + 1: "MANAGER", +} +var NodeRole_value = map[string]int32{ + "WORKER": 0, + "MANAGER": 1, +} + +func (x NodeRole) String() string { + return proto.EnumName(NodeRole_name, int32(x)) +} +func (NodeRole) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } + +type RaftMemberStatus_Reachability int32 + +const ( + // Unknown indicates that the manager state cannot be resolved + RaftMemberStatus_UNKNOWN RaftMemberStatus_Reachability = 0 + // Unreachable indicates that the node cannot be contacted by other + // raft cluster members. + RaftMemberStatus_UNREACHABLE RaftMemberStatus_Reachability = 1 + // Reachable indicates that the node is healthy and reachable + // by other members. + RaftMemberStatus_REACHABLE RaftMemberStatus_Reachability = 2 +) + +var RaftMemberStatus_Reachability_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UNREACHABLE", + 2: "REACHABLE", +} +var RaftMemberStatus_Reachability_value = map[string]int32{ + "UNKNOWN": 0, + "UNREACHABLE": 1, + "REACHABLE": 2, +} + +func (x RaftMemberStatus_Reachability) String() string { + return proto.EnumName(RaftMemberStatus_Reachability_name, int32(x)) +} +func (RaftMemberStatus_Reachability) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{13, 0} +} + +// TODO(aluzzardi) These should be using `gogoproto.enumvalue_customname`. +type NodeStatus_State int32 + +const ( + // Unknown indicates the node state cannot be resolved. + NodeStatus_UNKNOWN NodeStatus_State = 0 + // Down indicates the node is down. + NodeStatus_DOWN NodeStatus_State = 1 + // Ready indicates the node is ready to accept tasks. + NodeStatus_READY NodeStatus_State = 2 + // Disconnected indicates the node is currently trying to find new manager. + NodeStatus_DISCONNECTED NodeStatus_State = 3 +) + +var NodeStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "DOWN", + 2: "READY", + 3: "DISCONNECTED", +} +var NodeStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "DOWN": 1, + "READY": 2, + "DISCONNECTED": 3, +} + +func (x NodeStatus_State) String() string { + return proto.EnumName(NodeStatus_State_name, int32(x)) +} +func (NodeStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{14, 0} } + +type Mount_MountType int32 + +const ( + MountTypeBind Mount_MountType = 0 + MountTypeVolume Mount_MountType = 1 + MountTypeTmpfs Mount_MountType = 2 +) + +var Mount_MountType_name = map[int32]string{ + 0: "BIND", + 1: "VOLUME", + 2: "TMPFS", +} +var Mount_MountType_value = map[string]int32{ + "BIND": 0, + "VOLUME": 1, + "TMPFS": 2, +} + +func (x Mount_MountType) String() string { + return proto.EnumName(Mount_MountType_name, int32(x)) +} +func (Mount_MountType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 0} } + +// Consistency indicates the tolerable level of file system consistency +type Mount_MountConsistency int32 + +const ( + MountConsistencyDefault Mount_MountConsistency = 0 + MountConsistencyFull Mount_MountConsistency = 1 + MountConsistencyCached Mount_MountConsistency = 2 + MountConsistencyDelegated Mount_MountConsistency = 3 +) + +var Mount_MountConsistency_name = map[int32]string{ + 0: "DEFAULT", + 1: "CONSISTENT", + 2: "CACHED", + 3: "DELEGATED", +} +var Mount_MountConsistency_value = map[string]int32{ + "DEFAULT": 0, + "CONSISTENT": 1, + "CACHED": 2, + "DELEGATED": 3, +} + +func (x Mount_MountConsistency) String() string { + return proto.EnumName(Mount_MountConsistency_name, int32(x)) +} +func (Mount_MountConsistency) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{16, 1} +} + +type Mount_BindOptions_MountPropagation int32 + +const ( + MountPropagationRPrivate Mount_BindOptions_MountPropagation = 0 + MountPropagationPrivate Mount_BindOptions_MountPropagation = 1 + MountPropagationRShared Mount_BindOptions_MountPropagation = 2 + MountPropagationShared Mount_BindOptions_MountPropagation = 3 + MountPropagationRSlave Mount_BindOptions_MountPropagation = 4 + MountPropagationSlave Mount_BindOptions_MountPropagation = 5 +) + +var Mount_BindOptions_MountPropagation_name = map[int32]string{ + 0: "RPRIVATE", + 1: "PRIVATE", + 2: "RSHARED", + 3: "SHARED", + 4: "RSLAVE", + 5: "SLAVE", +} +var Mount_BindOptions_MountPropagation_value = map[string]int32{ + "RPRIVATE": 0, + "PRIVATE": 1, + "RSHARED": 2, + "SHARED": 3, + "RSLAVE": 4, + "SLAVE": 5, +} + +func (x Mount_BindOptions_MountPropagation) String() string { + return proto.EnumName(Mount_BindOptions_MountPropagation_name, int32(x)) +} +func (Mount_BindOptions_MountPropagation) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{16, 0, 0} +} + +type RestartPolicy_RestartCondition int32 + +const ( + RestartOnNone RestartPolicy_RestartCondition = 0 + RestartOnFailure RestartPolicy_RestartCondition = 1 + RestartOnAny RestartPolicy_RestartCondition = 2 +) + +var RestartPolicy_RestartCondition_name = map[int32]string{ + 0: "NONE", + 1: "ON_FAILURE", + 2: "ANY", +} +var RestartPolicy_RestartCondition_value = map[string]int32{ + "NONE": 0, + "ON_FAILURE": 1, + "ANY": 2, +} + +func (x RestartPolicy_RestartCondition) String() string { + return proto.EnumName(RestartPolicy_RestartCondition_name, int32(x)) +} +func (RestartPolicy_RestartCondition) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{17, 0} +} + +type UpdateConfig_FailureAction int32 + +const ( + UpdateConfig_PAUSE UpdateConfig_FailureAction = 0 + UpdateConfig_CONTINUE UpdateConfig_FailureAction = 1 + UpdateConfig_ROLLBACK UpdateConfig_FailureAction = 2 +) + +var UpdateConfig_FailureAction_name = map[int32]string{ + 0: "PAUSE", + 1: "CONTINUE", + 2: "ROLLBACK", +} +var UpdateConfig_FailureAction_value = map[string]int32{ + "PAUSE": 0, + "CONTINUE": 1, + "ROLLBACK": 2, +} + +func (x UpdateConfig_FailureAction) String() string { + return proto.EnumName(UpdateConfig_FailureAction_name, int32(x)) +} +func (UpdateConfig_FailureAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{18, 0} +} + +// UpdateOrder controls the order of operations when rolling out an +// updated task. Either the old task is shut down before the new task +// is started, or the new task is started before the old task is shut +// down. +type UpdateConfig_UpdateOrder int32 + +const ( + UpdateConfig_STOP_FIRST UpdateConfig_UpdateOrder = 0 + UpdateConfig_START_FIRST UpdateConfig_UpdateOrder = 1 +) + +var UpdateConfig_UpdateOrder_name = map[int32]string{ + 0: "STOP_FIRST", + 1: "START_FIRST", +} +var UpdateConfig_UpdateOrder_value = map[string]int32{ + "STOP_FIRST": 0, + "START_FIRST": 1, +} + +func (x UpdateConfig_UpdateOrder) String() string { + return proto.EnumName(UpdateConfig_UpdateOrder_name, int32(x)) +} +func (UpdateConfig_UpdateOrder) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{18, 1} +} + +type UpdateStatus_UpdateState int32 + +const ( + UpdateStatus_UNKNOWN UpdateStatus_UpdateState = 0 + UpdateStatus_UPDATING UpdateStatus_UpdateState = 1 + UpdateStatus_PAUSED UpdateStatus_UpdateState = 2 + UpdateStatus_COMPLETED UpdateStatus_UpdateState = 3 + UpdateStatus_ROLLBACK_STARTED UpdateStatus_UpdateState = 4 + UpdateStatus_ROLLBACK_PAUSED UpdateStatus_UpdateState = 5 + UpdateStatus_ROLLBACK_COMPLETED UpdateStatus_UpdateState = 6 +) + +var UpdateStatus_UpdateState_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UPDATING", + 2: "PAUSED", + 3: "COMPLETED", + 4: "ROLLBACK_STARTED", + 5: "ROLLBACK_PAUSED", + 6: "ROLLBACK_COMPLETED", +} +var UpdateStatus_UpdateState_value = map[string]int32{ + "UNKNOWN": 0, + "UPDATING": 1, + "PAUSED": 2, + "COMPLETED": 3, + "ROLLBACK_STARTED": 4, + "ROLLBACK_PAUSED": 5, + "ROLLBACK_COMPLETED": 6, +} + +func (x UpdateStatus_UpdateState) String() string { + return proto.EnumName(UpdateStatus_UpdateState_name, int32(x)) +} +func (UpdateStatus_UpdateState) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{19, 0} +} + +// AddressFamily specifies the network address family that +// this IPAMConfig belongs to. +type IPAMConfig_AddressFamily int32 + +const ( + IPAMConfig_UNKNOWN IPAMConfig_AddressFamily = 0 + IPAMConfig_IPV4 IPAMConfig_AddressFamily = 4 + IPAMConfig_IPV6 IPAMConfig_AddressFamily = 6 +) + +var IPAMConfig_AddressFamily_name = map[int32]string{ + 0: "UNKNOWN", + 4: "IPV4", + 6: "IPV6", +} +var IPAMConfig_AddressFamily_value = map[string]int32{ + "UNKNOWN": 0, + "IPV4": 4, + "IPV6": 6, +} + +func (x IPAMConfig_AddressFamily) String() string { + return proto.EnumName(IPAMConfig_AddressFamily_name, int32(x)) +} +func (IPAMConfig_AddressFamily) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{24, 0} +} + +type PortConfig_Protocol int32 + +const ( + ProtocolTCP PortConfig_Protocol = 0 + ProtocolUDP PortConfig_Protocol = 1 + ProtocolSCTP PortConfig_Protocol = 2 +) + +var PortConfig_Protocol_name = map[int32]string{ + 0: "TCP", + 1: "UDP", + 2: "SCTP", +} +var PortConfig_Protocol_value = map[string]int32{ + "TCP": 0, + "UDP": 1, + "SCTP": 2, +} + +func (x PortConfig_Protocol) String() string { + return proto.EnumName(PortConfig_Protocol_name, int32(x)) +} +func (PortConfig_Protocol) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{25, 0} } + +// PublishMode controls how ports are published on the swarm. +type PortConfig_PublishMode int32 + +const ( + // PublishModeIngress exposes the port across the cluster on all nodes. + PublishModeIngress PortConfig_PublishMode = 0 + // PublishModeHost exposes the port on just the target host. If the + // published port is undefined, an ephemeral port will be allocated. If + // the published port is defined, the node will attempt to allocate it, + // erroring the task if it fails. + PublishModeHost PortConfig_PublishMode = 1 +) + +var PortConfig_PublishMode_name = map[int32]string{ + 0: "INGRESS", + 1: "HOST", +} +var PortConfig_PublishMode_value = map[string]int32{ + "INGRESS": 0, + "HOST": 1, +} + +func (x PortConfig_PublishMode) String() string { + return proto.EnumName(PortConfig_PublishMode_name, int32(x)) +} +func (PortConfig_PublishMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{25, 1} +} + +type IssuanceStatus_State int32 + +const ( + IssuanceStateUnknown IssuanceStatus_State = 0 + // A new certificate should be issued + IssuanceStateRenew IssuanceStatus_State = 1 + // Certificate is pending acceptance + IssuanceStatePending IssuanceStatus_State = 2 + // successful completion certificate issuance + IssuanceStateIssued IssuanceStatus_State = 3 + // Certificate issuance failed + IssuanceStateFailed IssuanceStatus_State = 4 + // Signals workers to renew their certificate. From the CA's perspective + // this is equivalent to IssuanceStateIssued: a noop. + IssuanceStateRotate IssuanceStatus_State = 5 +) + +var IssuanceStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "RENEW", + 2: "PENDING", + 3: "ISSUED", + 4: "FAILED", + 5: "ROTATE", +} +var IssuanceStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "RENEW": 1, + "PENDING": 2, + "ISSUED": 3, + "FAILED": 4, + "ROTATE": 5, +} + +func (x IssuanceStatus_State) String() string { + return proto.EnumName(IssuanceStatus_State_name, int32(x)) +} +func (IssuanceStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{30, 0} } + +type ExternalCA_CAProtocol int32 + +const ( + ExternalCA_CAProtocolCFSSL ExternalCA_CAProtocol = 0 +) + +var ExternalCA_CAProtocol_name = map[int32]string{ + 0: "CFSSL", +} +var ExternalCA_CAProtocol_value = map[string]int32{ + "CFSSL": 0, +} + +func (x ExternalCA_CAProtocol) String() string { + return proto.EnumName(ExternalCA_CAProtocol_name, int32(x)) +} +func (ExternalCA_CAProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{32, 0} +} + +// Encryption algorithm that can implemented using this key +type EncryptionKey_Algorithm int32 + +const ( + AES_128_GCM EncryptionKey_Algorithm = 0 +) + +var EncryptionKey_Algorithm_name = map[int32]string{ + 0: "AES_128_GCM", +} +var EncryptionKey_Algorithm_value = map[string]int32{ + "AES_128_GCM": 0, +} + +func (x EncryptionKey_Algorithm) String() string { + return proto.EnumName(EncryptionKey_Algorithm_name, int32(x)) +} +func (EncryptionKey_Algorithm) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{45, 0} +} + +type MaybeEncryptedRecord_Algorithm int32 + +const ( + MaybeEncryptedRecord_NotEncrypted MaybeEncryptedRecord_Algorithm = 0 + MaybeEncryptedRecord_NACLSecretboxSalsa20Poly1305 MaybeEncryptedRecord_Algorithm = 1 + MaybeEncryptedRecord_FernetAES128CBC MaybeEncryptedRecord_Algorithm = 2 +) + +var MaybeEncryptedRecord_Algorithm_name = map[int32]string{ + 0: "NONE", + 1: "SECRETBOX_SALSA20_POLY1305", + 2: "FERNET_AES_128_CBC", +} +var MaybeEncryptedRecord_Algorithm_value = map[string]int32{ + "NONE": 0, + "SECRETBOX_SALSA20_POLY1305": 1, + "FERNET_AES_128_CBC": 2, +} + +func (x MaybeEncryptedRecord_Algorithm) String() string { + return proto.EnumName(MaybeEncryptedRecord_Algorithm_name, int32(x)) +} +func (MaybeEncryptedRecord_Algorithm) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{52, 0} +} + +// Version tracks the last time an object in the store was updated. +type Version struct { + Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } + +type IndexEntry struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Val string `protobuf:"bytes,2,opt,name=val,proto3" json:"val,omitempty"` +} + +func (m *IndexEntry) Reset() { *m = IndexEntry{} } +func (*IndexEntry) ProtoMessage() {} +func (*IndexEntry) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } + +// Annotations provide useful information to identify API objects. They are +// common to all API specs. +type Annotations struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Indices provides keys and values for indexing this object. + // A single key may have multiple values. + Indices []IndexEntry `protobuf:"bytes,4,rep,name=indices" json:"indices"` +} + +func (m *Annotations) Reset() { *m = Annotations{} } +func (*Annotations) ProtoMessage() {} +func (*Annotations) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *NamedGenericResource) Reset() { *m = NamedGenericResource{} } +func (*NamedGenericResource) ProtoMessage() {} +func (*NamedGenericResource) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{3} } + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *DiscreteGenericResource) Reset() { *m = DiscreteGenericResource{} } +func (*DiscreteGenericResource) ProtoMessage() {} +func (*DiscreteGenericResource) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{4} } + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + // Types that are valid to be assigned to Resource: + // *GenericResource_NamedResourceSpec + // *GenericResource_DiscreteResourceSpec + Resource isGenericResource_Resource `protobuf_oneof:"resource"` +} + +func (m *GenericResource) Reset() { *m = GenericResource{} } +func (*GenericResource) ProtoMessage() {} +func (*GenericResource) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{5} } + +type isGenericResource_Resource interface { + isGenericResource_Resource() + MarshalTo([]byte) (int, error) + Size() int +} + +type GenericResource_NamedResourceSpec struct { + NamedResourceSpec *NamedGenericResource `protobuf:"bytes,1,opt,name=named_resource_spec,json=namedResourceSpec,oneof"` +} +type GenericResource_DiscreteResourceSpec struct { + DiscreteResourceSpec *DiscreteGenericResource `protobuf:"bytes,2,opt,name=discrete_resource_spec,json=discreteResourceSpec,oneof"` +} + +func (*GenericResource_NamedResourceSpec) isGenericResource_Resource() {} +func (*GenericResource_DiscreteResourceSpec) isGenericResource_Resource() {} + +func (m *GenericResource) GetResource() isGenericResource_Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *GenericResource) GetNamedResourceSpec() *NamedGenericResource { + if x, ok := m.GetResource().(*GenericResource_NamedResourceSpec); ok { + return x.NamedResourceSpec + } + return nil +} + +func (m *GenericResource) GetDiscreteResourceSpec() *DiscreteGenericResource { + if x, ok := m.GetResource().(*GenericResource_DiscreteResourceSpec); ok { + return x.DiscreteResourceSpec + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GenericResource) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GenericResource_OneofMarshaler, _GenericResource_OneofUnmarshaler, _GenericResource_OneofSizer, []interface{}{ + (*GenericResource_NamedResourceSpec)(nil), + (*GenericResource_DiscreteResourceSpec)(nil), + } +} + +func _GenericResource_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GenericResource) + // resource + switch x := m.Resource.(type) { + case *GenericResource_NamedResourceSpec: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NamedResourceSpec); err != nil { + return err + } + case *GenericResource_DiscreteResourceSpec: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DiscreteResourceSpec); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GenericResource.Resource has unexpected type %T", x) + } + return nil +} + +func _GenericResource_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GenericResource) + switch tag { + case 1: // resource.named_resource_spec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NamedGenericResource) + err := b.DecodeMessage(msg) + m.Resource = &GenericResource_NamedResourceSpec{msg} + return true, err + case 2: // resource.discrete_resource_spec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DiscreteGenericResource) + err := b.DecodeMessage(msg) + m.Resource = &GenericResource_DiscreteResourceSpec{msg} + return true, err + default: + return false, nil + } +} + +func _GenericResource_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GenericResource) + // resource + switch x := m.Resource.(type) { + case *GenericResource_NamedResourceSpec: + s := proto.Size(x.NamedResourceSpec) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GenericResource_DiscreteResourceSpec: + s := proto.Size(x.DiscreteResourceSpec) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Resources struct { + // Amount of CPUs (e.g. 2000000000 = 2 CPU cores) + NanoCPUs int64 `protobuf:"varint,1,opt,name=nano_cpus,json=nanoCpus,proto3" json:"nano_cpus,omitempty"` + // Amount of memory in bytes. + MemoryBytes int64 `protobuf:"varint,2,opt,name=memory_bytes,json=memoryBytes,proto3" json:"memory_bytes,omitempty"` + // User specified resource (e.g: bananas=2;apple={red,yellow,green}) + Generic []*GenericResource `protobuf:"bytes,3,rep,name=generic" json:"generic,omitempty"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{6} } + +type ResourceRequirements struct { + Limits *Resources `protobuf:"bytes,1,opt,name=limits" json:"limits,omitempty"` + Reservations *Resources `protobuf:"bytes,2,opt,name=reservations" json:"reservations,omitempty"` +} + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{7} } + +type Platform struct { + // Architecture (e.g. x86_64) + Architecture string `protobuf:"bytes,1,opt,name=architecture,proto3" json:"architecture,omitempty"` + // Operating System (e.g. linux) + OS string `protobuf:"bytes,2,opt,name=os,proto3" json:"os,omitempty"` +} + +func (m *Platform) Reset() { *m = Platform{} } +func (*Platform) ProtoMessage() {} +func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{8} } + +// PluginDescription describes an engine plugin. +type PluginDescription struct { + // Type of plugin. Canonical values for existing types are + // Volume, Network, and Authorization. More types could be + // supported in the future. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Name of the plugin + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *PluginDescription) Reset() { *m = PluginDescription{} } +func (*PluginDescription) ProtoMessage() {} +func (*PluginDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{9} } + +type EngineDescription struct { + // Docker daemon version running on the node. + EngineVersion string `protobuf:"bytes,1,opt,name=engine_version,json=engineVersion,proto3" json:"engine_version,omitempty"` + // Labels attached to the engine. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume, Network, and Auth plugins + Plugins []PluginDescription `protobuf:"bytes,3,rep,name=plugins" json:"plugins"` +} + +func (m *EngineDescription) Reset() { *m = EngineDescription{} } +func (*EngineDescription) ProtoMessage() {} +func (*EngineDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{10} } + +type NodeDescription struct { + // Hostname of the node as reported by the agent. + // This is different from spec.meta.name which is user-defined. + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Platform of the node. + Platform *Platform `protobuf:"bytes,2,opt,name=platform" json:"platform,omitempty"` + // Total resources on the node. + Resources *Resources `protobuf:"bytes,3,opt,name=resources" json:"resources,omitempty"` + // Information about the Docker Engine on the node. + Engine *EngineDescription `protobuf:"bytes,4,opt,name=engine" json:"engine,omitempty"` + // Information on the node's TLS setup + TLSInfo *NodeTLSInfo `protobuf:"bytes,5,opt,name=tls_info,json=tlsInfo" json:"tls_info,omitempty"` + // FIPS indicates whether the node has FIPS-enabled + FIPS bool `protobuf:"varint,6,opt,name=fips,proto3" json:"fips,omitempty"` +} + +func (m *NodeDescription) Reset() { *m = NodeDescription{} } +func (*NodeDescription) ProtoMessage() {} +func (*NodeDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11} } + +type NodeTLSInfo struct { + // Information about which root certs the node trusts + TrustRoot []byte `protobuf:"bytes,1,opt,name=trust_root,json=trustRoot,proto3" json:"trust_root,omitempty"` + // Information about the node's current TLS certificate + CertIssuerSubject []byte `protobuf:"bytes,2,opt,name=cert_issuer_subject,json=certIssuerSubject,proto3" json:"cert_issuer_subject,omitempty"` + CertIssuerPublicKey []byte `protobuf:"bytes,3,opt,name=cert_issuer_public_key,json=certIssuerPublicKey,proto3" json:"cert_issuer_public_key,omitempty"` +} + +func (m *NodeTLSInfo) Reset() { *m = NodeTLSInfo{} } +func (*NodeTLSInfo) ProtoMessage() {} +func (*NodeTLSInfo) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{12} } + +type RaftMemberStatus struct { + Leader bool `protobuf:"varint,1,opt,name=leader,proto3" json:"leader,omitempty"` + Reachability RaftMemberStatus_Reachability `protobuf:"varint,2,opt,name=reachability,proto3,enum=docker.swarmkit.v1.RaftMemberStatus_Reachability" json:"reachability,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *RaftMemberStatus) Reset() { *m = RaftMemberStatus{} } +func (*RaftMemberStatus) ProtoMessage() {} +func (*RaftMemberStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{13} } + +type NodeStatus struct { + State NodeStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.NodeStatus_State" json:"state,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Addr is the node's IP address as observed by the manager + Addr string `protobuf:"bytes,3,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{14} } + +type Image struct { + // reference is a docker image reference. This can include a rpository, tag + // or be fully qualified witha digest. The format is specified in the + // distribution/reference package. + Reference string `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"` +} + +func (m *Image) Reset() { *m = Image{} } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{15} } + +// Mount describes volume mounts for a container. +// +// The Mount type follows the structure of the mount syscall, including a type, +// source, target. Top-level flags, such as writable, are common to all kinds +// of mounts, where we also provide options that are specific to a type of +// mount. This corresponds to flags and data, respectively, in the syscall. +type Mount struct { + // Type defines the nature of the mount. + Type Mount_MountType `protobuf:"varint,1,opt,name=type,proto3,enum=docker.swarmkit.v1.Mount_MountType" json:"type,omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + // Target path in container + Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + // ReadOnly should be set to true if the mount should not be writable. + ReadOnly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"` + Consistency Mount_MountConsistency `protobuf:"varint,8,opt,name=consistency,proto3,enum=docker.swarmkit.v1.Mount_MountConsistency" json:"consistency,omitempty"` + // BindOptions configures properties of a bind mount type. + // + // For mounts of type bind, the source must be an absolute host path. + BindOptions *Mount_BindOptions `protobuf:"bytes,5,opt,name=bind_options,json=bindOptions" json:"bind_options,omitempty"` + // VolumeOptions configures the properties specific to a volume mount type. + // + // For mounts of type volume, the source will be used as the volume name. + VolumeOptions *Mount_VolumeOptions `protobuf:"bytes,6,opt,name=volume_options,json=volumeOptions" json:"volume_options,omitempty"` + // TmpfsOptions allows one to set options for mounting a temporary + // filesystem. + // + // The source field will be ignored when using mounts of type tmpfs. + TmpfsOptions *Mount_TmpfsOptions `protobuf:"bytes,7,opt,name=tmpfs_options,json=tmpfsOptions" json:"tmpfs_options,omitempty"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16} } + +// BindOptions specifies options that are specific to a bind mount. +type Mount_BindOptions struct { + // Propagation mode of mount. + Propagation Mount_BindOptions_MountPropagation `protobuf:"varint,1,opt,name=propagation,proto3,enum=docker.swarmkit.v1.Mount_BindOptions_MountPropagation" json:"propagation,omitempty"` +} + +func (m *Mount_BindOptions) Reset() { *m = Mount_BindOptions{} } +func (*Mount_BindOptions) ProtoMessage() {} +func (*Mount_BindOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 0} } + +// VolumeOptions contains parameters for mounting the volume. +type Mount_VolumeOptions struct { + // nocopy prevents automatic copying of data to the volume with data from target + NoCopy bool `protobuf:"varint,1,opt,name=nocopy,proto3" json:"nocopy,omitempty"` + // labels to apply to the volume if creating + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // DriverConfig specifies the options that may be passed to the driver + // if the volume is created. + // + // If this is empty, no volume will be created if the volume is missing. + DriverConfig *Driver `protobuf:"bytes,3,opt,name=driver_config,json=driverConfig" json:"driver_config,omitempty"` +} + +func (m *Mount_VolumeOptions) Reset() { *m = Mount_VolumeOptions{} } +func (*Mount_VolumeOptions) ProtoMessage() {} +func (*Mount_VolumeOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 1} } + +type Mount_TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be convered to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `protobuf:"varint,2,opt,name=mode,proto3,customtype=os.FileMode" json:"mode"` +} + +func (m *Mount_TmpfsOptions) Reset() { *m = Mount_TmpfsOptions{} } +func (*Mount_TmpfsOptions) ProtoMessage() {} +func (*Mount_TmpfsOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 2} } + +type RestartPolicy struct { + Condition RestartPolicy_RestartCondition `protobuf:"varint,1,opt,name=condition,proto3,enum=docker.swarmkit.v1.RestartPolicy_RestartCondition" json:"condition,omitempty"` + // Delay between restart attempts + // Note: can't use stdduration because this field needs to be nullable. + Delay *google_protobuf1.Duration `protobuf:"bytes,2,opt,name=delay" json:"delay,omitempty"` + // MaxAttempts is the maximum number of restarts to attempt on an + // instance before giving up. Ignored if 0. + MaxAttempts uint64 `protobuf:"varint,3,opt,name=max_attempts,json=maxAttempts,proto3" json:"max_attempts,omitempty"` + // Window is the time window used to evaluate the restart policy. + // The time window is unbounded if this is 0. + // Note: can't use stdduration because this field needs to be nullable. + Window *google_protobuf1.Duration `protobuf:"bytes,4,opt,name=window" json:"window,omitempty"` +} + +func (m *RestartPolicy) Reset() { *m = RestartPolicy{} } +func (*RestartPolicy) ProtoMessage() {} +func (*RestartPolicy) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{17} } + +// UpdateConfig specifies the rate and policy of updates. +// TODO(aluzzardi): Consider making this a oneof with RollingStrategy and LockstepStrategy. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 `protobuf:"varint,1,opt,name=parallelism,proto3" json:"parallelism,omitempty"` + // Amount of time between updates. + Delay time.Duration `protobuf:"bytes,2,opt,name=delay,stdduration" json:"delay"` + // FailureAction is the action to take when an update failures. + FailureAction UpdateConfig_FailureAction `protobuf:"varint,3,opt,name=failure_action,json=failureAction,proto3,enum=docker.swarmkit.v1.UpdateConfig_FailureAction" json:"failure_action,omitempty"` + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + // Note: can't use stdduration because this field needs to be nullable. + Monitor *google_protobuf1.Duration `protobuf:"bytes,4,opt,name=monitor" json:"monitor,omitempty"` + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + // If the failure action is ROLLBACK, the orchestrator will attempt to + // roll back to the previous service spec. If the MaxFailureRatio + // threshold is hit during the rollback, the rollback will pause. + MaxFailureRatio float32 `protobuf:"fixed32,5,opt,name=max_failure_ratio,json=maxFailureRatio,proto3" json:"max_failure_ratio,omitempty"` + Order UpdateConfig_UpdateOrder `protobuf:"varint,6,opt,name=order,proto3,enum=docker.swarmkit.v1.UpdateConfig_UpdateOrder" json:"order,omitempty"` +} + +func (m *UpdateConfig) Reset() { *m = UpdateConfig{} } +func (*UpdateConfig) ProtoMessage() {} +func (*UpdateConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{18} } + +// UpdateStatus is the status of an update in progress. +type UpdateStatus struct { + // State is the state of this update. It indicates whether the + // update is in progress, completed, paused, rolling back, or + // finished rolling back. + State UpdateStatus_UpdateState `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.UpdateStatus_UpdateState" json:"state,omitempty"` + // StartedAt is the time at which the update was started. + // Note: can't use stdtime because this field is nullable. + StartedAt *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=started_at,json=startedAt" json:"started_at,omitempty"` + // CompletedAt is the time at which the update completed successfully, + // paused, or finished rolling back. + // Note: can't use stdtime because this field is nullable. + CompletedAt *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=completed_at,json=completedAt" json:"completed_at,omitempty"` + // Message explains how the update got into its current state. For + // example, if the update is paused, it will explain what is preventing + // the update from proceeding (typically the failure of a task to start up + // when OnFailure is PAUSE). + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *UpdateStatus) Reset() { *m = UpdateStatus{} } +func (*UpdateStatus) ProtoMessage() {} +func (*UpdateStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{19} } + +// Container specific status. +type ContainerStatus struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + PID int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + ExitCode int32 `protobuf:"varint,3,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` +} + +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{20} } + +// PortStatus specifies the actual allocated runtime state of a list +// of port configs. +type PortStatus struct { + Ports []*PortConfig `protobuf:"bytes,1,rep,name=ports" json:"ports,omitempty"` +} + +func (m *PortStatus) Reset() { *m = PortStatus{} } +func (*PortStatus) ProtoMessage() {} +func (*PortStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{21} } + +type TaskStatus struct { + // Note: can't use stdtime because this field is nullable. + Timestamp *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"` + // State expresses the current state of the task. + State TaskState `protobuf:"varint,2,opt,name=state,proto3,enum=docker.swarmkit.v1.TaskState" json:"state,omitempty"` + // Message reports a message for the task status. This should provide a + // human readable message that can point to how the task actually arrived + // at a current state. + // + // As a convention, we place the a small message here that led to the + // current state. For example, if the task is in ready, because it was + // prepared, we'd place "prepared" in this field. If we skipped preparation + // because the task is prepared, we would put "already prepared" in this + // field. + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + // Err is set if the task is in an error state, or is unable to + // progress from an earlier state because a precondition is + // unsatisfied. + // + // The following states should report a companion error: + // + // FAILED, REJECTED + // + // In general, messages that should be surfaced to users belong in the + // Err field, and notes on routine state transitions belong in Message. + // + // TODO(stevvooe) Integrate this field with the error interface. + Err string `protobuf:"bytes,4,opt,name=err,proto3" json:"err,omitempty"` + // Container status contains container specific status information. + // + // Types that are valid to be assigned to RuntimeStatus: + // *TaskStatus_Container + RuntimeStatus isTaskStatus_RuntimeStatus `protobuf_oneof:"runtime_status"` + // HostPorts provides a list of ports allocated at the host + // level. + PortStatus *PortStatus `protobuf:"bytes,6,opt,name=port_status,json=portStatus" json:"port_status,omitempty"` + // AppliedBy gives the node ID of the manager that applied this task + // status update to the Task object. + AppliedBy string `protobuf:"bytes,7,opt,name=applied_by,json=appliedBy,proto3" json:"applied_by,omitempty"` + // AppliedAt gives a timestamp of when this status update was applied to + // the Task object. + // Note: can't use stdtime because this field is nullable. + AppliedAt *google_protobuf.Timestamp `protobuf:"bytes,8,opt,name=applied_at,json=appliedAt" json:"applied_at,omitempty"` +} + +func (m *TaskStatus) Reset() { *m = TaskStatus{} } +func (*TaskStatus) ProtoMessage() {} +func (*TaskStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{22} } + +type isTaskStatus_RuntimeStatus interface { + isTaskStatus_RuntimeStatus() + MarshalTo([]byte) (int, error) + Size() int +} + +type TaskStatus_Container struct { + Container *ContainerStatus `protobuf:"bytes,5,opt,name=container,oneof"` +} + +func (*TaskStatus_Container) isTaskStatus_RuntimeStatus() {} + +func (m *TaskStatus) GetRuntimeStatus() isTaskStatus_RuntimeStatus { + if m != nil { + return m.RuntimeStatus + } + return nil +} + +func (m *TaskStatus) GetContainer() *ContainerStatus { + if x, ok := m.GetRuntimeStatus().(*TaskStatus_Container); ok { + return x.Container + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TaskStatus) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TaskStatus_OneofMarshaler, _TaskStatus_OneofUnmarshaler, _TaskStatus_OneofSizer, []interface{}{ + (*TaskStatus_Container)(nil), + } +} + +func _TaskStatus_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TaskStatus) + // runtime_status + switch x := m.RuntimeStatus.(type) { + case *TaskStatus_Container: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Container); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TaskStatus.RuntimeStatus has unexpected type %T", x) + } + return nil +} + +func _TaskStatus_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TaskStatus) + switch tag { + case 5: // runtime_status.container + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ContainerStatus) + err := b.DecodeMessage(msg) + m.RuntimeStatus = &TaskStatus_Container{msg} + return true, err + default: + return false, nil + } +} + +func _TaskStatus_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TaskStatus) + // runtime_status + switch x := m.RuntimeStatus.(type) { + case *TaskStatus_Container: + s := proto.Size(x.Container) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// NetworkAttachmentConfig specifies how a service should be attached to a particular network. +// +// For now, this is a simple struct, but this can include future information +// instructing Swarm on how this service should work on the particular +// network. +type NetworkAttachmentConfig struct { + // Target specifies the target network for attachment. This value must be a + // network ID. + Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + // Aliases specifies a list of discoverable alternate names for the service on this Target. + Aliases []string `protobuf:"bytes,2,rep,name=aliases" json:"aliases,omitempty"` + // Addresses specifies a list of ipv4 and ipv6 addresses + // preferred. If these addresses are not available then the + // attachment might fail. + Addresses []string `protobuf:"bytes,3,rep,name=addresses" json:"addresses,omitempty"` + // DriverAttachmentOpts is a map of driver attachment options for the network target + DriverAttachmentOpts map[string]string `protobuf:"bytes,4,rep,name=driver_attachment_opts,json=driverAttachmentOpts" json:"driver_attachment_opts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NetworkAttachmentConfig) Reset() { *m = NetworkAttachmentConfig{} } +func (*NetworkAttachmentConfig) ProtoMessage() {} +func (*NetworkAttachmentConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{23} } + +// IPAMConfig specifies parameters for IP Address Management. +type IPAMConfig struct { + Family IPAMConfig_AddressFamily `protobuf:"varint,1,opt,name=family,proto3,enum=docker.swarmkit.v1.IPAMConfig_AddressFamily" json:"family,omitempty"` + // Subnet defines a network as a CIDR address (ie network and mask + // 192.168.0.1/24). + Subnet string `protobuf:"bytes,2,opt,name=subnet,proto3" json:"subnet,omitempty"` + // Range defines the portion of the subnet to allocate to tasks. This is + // defined as a subnet within the primary subnet. + Range string `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` + // Gateway address within the subnet. + Gateway string `protobuf:"bytes,4,opt,name=gateway,proto3" json:"gateway,omitempty"` + // Reserved is a list of address from the master pool that should *not* be + // allocated. These addresses may have already been allocated or may be + // reserved for another allocation manager. + Reserved map[string]string `protobuf:"bytes,5,rep,name=reserved" json:"reserved,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *IPAMConfig) Reset() { *m = IPAMConfig{} } +func (*IPAMConfig) ProtoMessage() {} +func (*IPAMConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{24} } + +// PortConfig specifies an exposed port which can be +// addressed using the given name. This can be later queried +// using a service discovery api or a DNS SRV query. The node +// port specifies a port that can be used to address this +// service external to the cluster by sending a connection +// request to this port to any node on the cluster. +type PortConfig struct { + // Name for the port. If provided the port information can + // be queried using the name as in a DNS SRV query. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Protocol for the port which is exposed. + Protocol PortConfig_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=docker.swarmkit.v1.PortConfig_Protocol" json:"protocol,omitempty"` + // The port which the application is exposing and is bound to. + TargetPort uint32 `protobuf:"varint,3,opt,name=target_port,json=targetPort,proto3" json:"target_port,omitempty"` + // PublishedPort specifies the port on which the service is exposed. If + // specified, the port must be within the available range. If not specified + // (value is zero), an available port is automatically assigned. + PublishedPort uint32 `protobuf:"varint,4,opt,name=published_port,json=publishedPort,proto3" json:"published_port,omitempty"` + // PublishMode controls how the port is published. + PublishMode PortConfig_PublishMode `protobuf:"varint,5,opt,name=publish_mode,json=publishMode,proto3,enum=docker.swarmkit.v1.PortConfig_PublishMode" json:"publish_mode,omitempty"` +} + +func (m *PortConfig) Reset() { *m = PortConfig{} } +func (*PortConfig) ProtoMessage() {} +func (*PortConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{25} } + +// Driver is a generic driver type to be used throughout the API. For now, a +// driver is simply a name and set of options. The field contents depend on the +// target use case and driver application. For example, a network driver may +// have different rules than a volume driver. +type Driver struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Options map[string]string `protobuf:"bytes,2,rep,name=options" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Driver) Reset() { *m = Driver{} } +func (*Driver) ProtoMessage() {} +func (*Driver) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{26} } + +type IPAMOptions struct { + Driver *Driver `protobuf:"bytes,1,opt,name=driver" json:"driver,omitempty"` + Configs []*IPAMConfig `protobuf:"bytes,3,rep,name=configs" json:"configs,omitempty"` +} + +func (m *IPAMOptions) Reset() { *m = IPAMOptions{} } +func (*IPAMOptions) ProtoMessage() {} +func (*IPAMOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{27} } + +// Peer should be used anywhere where we are describing a remote peer. +type Peer struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *Peer) Reset() { *m = Peer{} } +func (*Peer) ProtoMessage() {} +func (*Peer) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{28} } + +// WeightedPeer should be used anywhere where we are describing a remote peer +// with a weight. +type WeightedPeer struct { + Peer *Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` + Weight int64 `protobuf:"varint,2,opt,name=weight,proto3" json:"weight,omitempty"` +} + +func (m *WeightedPeer) Reset() { *m = WeightedPeer{} } +func (*WeightedPeer) ProtoMessage() {} +func (*WeightedPeer) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{29} } + +type IssuanceStatus struct { + State IssuanceStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.IssuanceStatus_State" json:"state,omitempty"` + // Err is set if the Certificate Issuance is in an error state. + // The following states should report a companion error: + // FAILED + Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` +} + +func (m *IssuanceStatus) Reset() { *m = IssuanceStatus{} } +func (*IssuanceStatus) ProtoMessage() {} +func (*IssuanceStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{30} } + +type AcceptancePolicy struct { + Policies []*AcceptancePolicy_RoleAdmissionPolicy `protobuf:"bytes,1,rep,name=policies" json:"policies,omitempty"` +} + +func (m *AcceptancePolicy) Reset() { *m = AcceptancePolicy{} } +func (*AcceptancePolicy) ProtoMessage() {} +func (*AcceptancePolicy) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{31} } + +type AcceptancePolicy_RoleAdmissionPolicy struct { + Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // Autoaccept controls which roles' certificates are automatically + // issued without administrator intervention. + Autoaccept bool `protobuf:"varint,2,opt,name=autoaccept,proto3" json:"autoaccept,omitempty"` + // Secret represents a user-provided string that is necessary for new + // nodes to join the cluster + Secret *AcceptancePolicy_RoleAdmissionPolicy_Secret `protobuf:"bytes,3,opt,name=secret" json:"secret,omitempty"` +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Reset() { *m = AcceptancePolicy_RoleAdmissionPolicy{} } +func (*AcceptancePolicy_RoleAdmissionPolicy) ProtoMessage() {} +func (*AcceptancePolicy_RoleAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{31, 0} +} + +type AcceptancePolicy_RoleAdmissionPolicy_Secret struct { + // The actual content (possibly hashed) + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // The type of hash we are using, or "plaintext" + Alg string `protobuf:"bytes,2,opt,name=alg,proto3" json:"alg,omitempty"` +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Reset() { + *m = AcceptancePolicy_RoleAdmissionPolicy_Secret{} +} +func (*AcceptancePolicy_RoleAdmissionPolicy_Secret) ProtoMessage() {} +func (*AcceptancePolicy_RoleAdmissionPolicy_Secret) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{31, 0, 0} +} + +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCA_CAProtocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=docker.swarmkit.v1.ExternalCA_CAProtocol" json:"protocol,omitempty"` + // URL is the URL where the external CA can be reached. + URL string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `protobuf:"bytes,3,rep,name=options" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // CACert specifies which root CA is used by this external CA + CACert []byte `protobuf:"bytes,4,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` +} + +func (m *ExternalCA) Reset() { *m = ExternalCA{} } +func (*ExternalCA) ProtoMessage() {} +func (*ExternalCA) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{32} } + +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + // Note: can't use stdduration because this field needs to be nullable. + NodeCertExpiry *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=node_cert_expiry,json=nodeCertExpiry" json:"node_cert_expiry,omitempty"` + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `protobuf:"bytes,2,rep,name=external_cas,json=externalCas" json:"external_cas,omitempty"` + // SigningCACert is the desired CA certificate to be used as the root and + // signing CA for the swarm. If not provided, indicates that we are either happy + // with the current configuration, or (together with a bump in the ForceRotate value) + // that we want a certificate and key generated for us. + SigningCACert []byte `protobuf:"bytes,3,opt,name=signing_ca_cert,json=signingCaCert,proto3" json:"signing_ca_cert,omitempty"` + // SigningCAKey is the desired private key, matching the signing CA cert, to be used + // to sign certificates for the swarm + SigningCAKey []byte `protobuf:"bytes,4,opt,name=signing_ca_key,json=signingCaKey,proto3" json:"signing_ca_key,omitempty"` + // ForceRotate is a counter that triggers a root CA rotation even if no relevant + // parameters have been in the spec. This will force the manager to generate a new + // certificate and key, if none have been provided. + ForceRotate uint64 `protobuf:"varint,5,opt,name=force_rotate,json=forceRotate,proto3" json:"force_rotate,omitempty"` +} + +func (m *CAConfig) Reset() { *m = CAConfig{} } +func (*CAConfig) ProtoMessage() {} +func (*CAConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{33} } + +// OrchestrationConfig defines cluster-level orchestration settings. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit int64 `protobuf:"varint,1,opt,name=task_history_retention_limit,json=taskHistoryRetentionLimit,proto3" json:"task_history_retention_limit,omitempty"` +} + +func (m *OrchestrationConfig) Reset() { *m = OrchestrationConfig{} } +func (*OrchestrationConfig) ProtoMessage() {} +func (*OrchestrationConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{34} } + +// TaskDefaults specifies default values for task creation. +type TaskDefaults struct { + // LogDriver specifies the log driver to use for the cluster if not + // specified for each task. + // + // If this is changed, only new tasks will pick up the new log driver. + // Existing tasks will continue to use the previous default until rescheduled. + LogDriver *Driver `protobuf:"bytes,1,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"` +} + +func (m *TaskDefaults) Reset() { *m = TaskDefaults{} } +func (*TaskDefaults) ProtoMessage() {} +func (*TaskDefaults) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{35} } + +// DispatcherConfig defines cluster-level dispatcher settings. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + // Note: can't use stdduration because this field needs to be nullable. + HeartbeatPeriod *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=heartbeat_period,json=heartbeatPeriod" json:"heartbeat_period,omitempty"` +} + +func (m *DispatcherConfig) Reset() { *m = DispatcherConfig{} } +func (*DispatcherConfig) ProtoMessage() {} +func (*DispatcherConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{36} } + +// RaftConfig defines raft settings for the cluster. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `protobuf:"varint,1,opt,name=snapshot_interval,json=snapshotInterval,proto3" json:"snapshot_interval,omitempty"` + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots uint64 `protobuf:"varint,2,opt,name=keep_old_snapshots,json=keepOldSnapshots,proto3" json:"keep_old_snapshots,omitempty"` + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `protobuf:"varint,3,opt,name=log_entries_for_slow_followers,json=logEntriesForSlowFollowers,proto3" json:"log_entries_for_slow_followers,omitempty"` + // HeartbeatTick defines the amount of ticks (in seconds) between + // each heartbeat message sent to other members for health-check. + HeartbeatTick uint32 `protobuf:"varint,4,opt,name=heartbeat_tick,json=heartbeatTick,proto3" json:"heartbeat_tick,omitempty"` + // ElectionTick defines the amount of ticks (in seconds) needed + // without a leader to trigger a new election. + ElectionTick uint32 `protobuf:"varint,5,opt,name=election_tick,json=electionTick,proto3" json:"election_tick,omitempty"` +} + +func (m *RaftConfig) Reset() { *m = RaftConfig{} } +func (*RaftConfig) ProtoMessage() {} +func (*RaftConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{37} } + +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool `protobuf:"varint,1,opt,name=auto_lock_managers,json=autoLockManagers,proto3" json:"auto_lock_managers,omitempty"` +} + +func (m *EncryptionConfig) Reset() { *m = EncryptionConfig{} } +func (*EncryptionConfig) ProtoMessage() {} +func (*EncryptionConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{38} } + +type SpreadOver struct { + SpreadDescriptor string `protobuf:"bytes,1,opt,name=spread_descriptor,json=spreadDescriptor,proto3" json:"spread_descriptor,omitempty"` +} + +func (m *SpreadOver) Reset() { *m = SpreadOver{} } +func (*SpreadOver) ProtoMessage() {} +func (*SpreadOver) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{39} } + +type PlacementPreference struct { + // Types that are valid to be assigned to Preference: + // *PlacementPreference_Spread + Preference isPlacementPreference_Preference `protobuf_oneof:"Preference"` +} + +func (m *PlacementPreference) Reset() { *m = PlacementPreference{} } +func (*PlacementPreference) ProtoMessage() {} +func (*PlacementPreference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{40} } + +type isPlacementPreference_Preference interface { + isPlacementPreference_Preference() + MarshalTo([]byte) (int, error) + Size() int +} + +type PlacementPreference_Spread struct { + Spread *SpreadOver `protobuf:"bytes,1,opt,name=spread,oneof"` +} + +func (*PlacementPreference_Spread) isPlacementPreference_Preference() {} + +func (m *PlacementPreference) GetPreference() isPlacementPreference_Preference { + if m != nil { + return m.Preference + } + return nil +} + +func (m *PlacementPreference) GetSpread() *SpreadOver { + if x, ok := m.GetPreference().(*PlacementPreference_Spread); ok { + return x.Spread + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PlacementPreference) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PlacementPreference_OneofMarshaler, _PlacementPreference_OneofUnmarshaler, _PlacementPreference_OneofSizer, []interface{}{ + (*PlacementPreference_Spread)(nil), + } +} + +func _PlacementPreference_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PlacementPreference) + // Preference + switch x := m.Preference.(type) { + case *PlacementPreference_Spread: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Spread); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PlacementPreference.Preference has unexpected type %T", x) + } + return nil +} + +func _PlacementPreference_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PlacementPreference) + switch tag { + case 1: // Preference.spread + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SpreadOver) + err := b.DecodeMessage(msg) + m.Preference = &PlacementPreference_Spread{msg} + return true, err + default: + return false, nil + } +} + +func _PlacementPreference_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PlacementPreference) + // Preference + switch x := m.Preference.(type) { + case *PlacementPreference_Spread: + s := proto.Size(x.Spread) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Placement specifies task distribution constraints. +type Placement struct { + // Constraints specifies a set of requirements a node should meet for a task. + Constraints []string `protobuf:"bytes,1,rep,name=constraints" json:"constraints,omitempty"` + // Preferences provide a way to make the scheduler aware of factors + // such as topology. They are provided in order from highest to lowest + // precedence. + Preferences []*PlacementPreference `protobuf:"bytes,2,rep,name=preferences" json:"preferences,omitempty"` + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []*Platform `protobuf:"bytes,3,rep,name=platforms" json:"platforms,omitempty"` +} + +func (m *Placement) Reset() { *m = Placement{} } +func (*Placement) ProtoMessage() {} +func (*Placement) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{41} } + +// JoinToken contains the join tokens for workers and managers. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string `protobuf:"bytes,1,opt,name=worker,proto3" json:"worker,omitempty"` + // Manager is the join token workers may use to join the swarm. + Manager string `protobuf:"bytes,2,opt,name=manager,proto3" json:"manager,omitempty"` +} + +func (m *JoinTokens) Reset() { *m = JoinTokens{} } +func (*JoinTokens) ProtoMessage() {} +func (*JoinTokens) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{42} } + +type RootCA struct { + // CAKey is the root CA private key. + CAKey []byte `protobuf:"bytes,1,opt,name=ca_key,json=caKey,proto3" json:"ca_key,omitempty"` + // CACert is the root CA certificate. + CACert []byte `protobuf:"bytes,2,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` + // CACertHash is the digest of the CA Certificate. + CACertHash string `protobuf:"bytes,3,opt,name=ca_cert_hash,json=caCertHash,proto3" json:"ca_cert_hash,omitempty"` + // JoinTokens contains the join tokens for workers and managers. + JoinTokens JoinTokens `protobuf:"bytes,4,opt,name=join_tokens,json=joinTokens" json:"join_tokens"` + // RootRotation contains the new root cert and key we want to rotate to - if this is nil, we are not in the + // middle of a root rotation + RootRotation *RootRotation `protobuf:"bytes,5,opt,name=root_rotation,json=rootRotation" json:"root_rotation,omitempty"` + // LastForcedRotation matches the Cluster Spec's CAConfig's ForceRotation counter. + // It indicates when the current CA cert and key were generated (or updated). + LastForcedRotation uint64 `protobuf:"varint,6,opt,name=last_forced_rotation,json=lastForcedRotation,proto3" json:"last_forced_rotation,omitempty"` +} + +func (m *RootCA) Reset() { *m = RootCA{} } +func (*RootCA) ProtoMessage() {} +func (*RootCA) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{43} } + +type Certificate struct { + Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + CSR []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` + Status IssuanceStatus `protobuf:"bytes,3,opt,name=status" json:"status"` + Certificate []byte `protobuf:"bytes,4,opt,name=certificate,proto3" json:"certificate,omitempty"` + // CN represents the node ID. + CN string `protobuf:"bytes,5,opt,name=cn,proto3" json:"cn,omitempty"` +} + +func (m *Certificate) Reset() { *m = Certificate{} } +func (*Certificate) ProtoMessage() {} +func (*Certificate) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{44} } + +// Symmetric keys to encrypt inter-agent communication. +type EncryptionKey struct { + // Agent subsystem the key is intended for. Example: + // networking:gossip + Subsystem string `protobuf:"bytes,1,opt,name=subsystem,proto3" json:"subsystem,omitempty"` + Algorithm EncryptionKey_Algorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=docker.swarmkit.v1.EncryptionKey_Algorithm" json:"algorithm,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Time stamp from the lamport clock of the key allocator to + // identify the relative age of the key. + LamportTime uint64 `protobuf:"varint,4,opt,name=lamport_time,json=lamportTime,proto3" json:"lamport_time,omitempty"` +} + +func (m *EncryptionKey) Reset() { *m = EncryptionKey{} } +func (*EncryptionKey) ProtoMessage() {} +func (*EncryptionKey) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{45} } + +// ManagerStatus provides informations about the state of a manager in the cluster. +type ManagerStatus struct { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` + // Addr is the address advertised to raft. + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` + // Leader is set to true if this node is the raft leader. + Leader bool `protobuf:"varint,3,opt,name=leader,proto3" json:"leader,omitempty"` + // Reachability specifies whether this node is reachable. + Reachability RaftMemberStatus_Reachability `protobuf:"varint,4,opt,name=reachability,proto3,enum=docker.swarmkit.v1.RaftMemberStatus_Reachability" json:"reachability,omitempty"` +} + +func (m *ManagerStatus) Reset() { *m = ManagerStatus{} } +func (*ManagerStatus) ProtoMessage() {} +func (*ManagerStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{46} } + +// FileTarget represents a specific target that is backed by a file +type FileTarget struct { + // Name represents the final filename in the filesystem + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // UID represents the file UID + UID string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"` + // GID represents the file GID + GID string `protobuf:"bytes,3,opt,name=gid,proto3" json:"gid,omitempty"` + // Mode represents the FileMode of the file + Mode os.FileMode `protobuf:"varint,4,opt,name=mode,proto3,customtype=os.FileMode" json:"mode"` +} + +func (m *FileTarget) Reset() { *m = FileTarget{} } +func (*FileTarget) ProtoMessage() {} +func (*FileTarget) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{47} } + +// SecretReference is the linkage between a service and a secret that it uses. +type SecretReference struct { + // SecretID represents the ID of the specific Secret that we're + // referencing. This identifier exists so that SecretReferences don't leak + // any information about the secret contents. + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` + // SecretName is the name of the secret that this references, but this is just provided for + // lookup/display purposes. The secret in the reference will be identified by its ID. + SecretName string `protobuf:"bytes,2,opt,name=secret_name,json=secretName,proto3" json:"secret_name,omitempty"` + // Target specifies how this secret should be exposed to the task. + // + // Types that are valid to be assigned to Target: + // *SecretReference_File + Target isSecretReference_Target `protobuf_oneof:"target"` +} + +func (m *SecretReference) Reset() { *m = SecretReference{} } +func (*SecretReference) ProtoMessage() {} +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{48} } + +type isSecretReference_Target interface { + isSecretReference_Target() + MarshalTo([]byte) (int, error) + Size() int +} + +type SecretReference_File struct { + File *FileTarget `protobuf:"bytes,3,opt,name=file,oneof"` +} + +func (*SecretReference_File) isSecretReference_Target() {} + +func (m *SecretReference) GetTarget() isSecretReference_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *SecretReference) GetFile() *FileTarget { + if x, ok := m.GetTarget().(*SecretReference_File); ok { + return x.File + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SecretReference) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SecretReference_OneofMarshaler, _SecretReference_OneofUnmarshaler, _SecretReference_OneofSizer, []interface{}{ + (*SecretReference_File)(nil), + } +} + +func _SecretReference_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SecretReference) + // target + switch x := m.Target.(type) { + case *SecretReference_File: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.File); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SecretReference.Target has unexpected type %T", x) + } + return nil +} + +func _SecretReference_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SecretReference) + switch tag { + case 3: // target.file + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileTarget) + err := b.DecodeMessage(msg) + m.Target = &SecretReference_File{msg} + return true, err + default: + return false, nil + } +} + +func _SecretReference_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SecretReference) + // target + switch x := m.Target.(type) { + case *SecretReference_File: + s := proto.Size(x.File) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// ConfigReference is the linkage between a service and a config that it uses. +type ConfigReference struct { + // ConfigID represents the ID of the specific Config that we're + // referencing. + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` + // ConfigName is the name of the config that this references, but this is just provided for + // lookup/display purposes. The config in the reference will be identified by its ID. + ConfigName string `protobuf:"bytes,2,opt,name=config_name,json=configName,proto3" json:"config_name,omitempty"` + // Target specifies how this secret should be exposed to the task. + // + // Types that are valid to be assigned to Target: + // *ConfigReference_File + Target isConfigReference_Target `protobuf_oneof:"target"` +} + +func (m *ConfigReference) Reset() { *m = ConfigReference{} } +func (*ConfigReference) ProtoMessage() {} +func (*ConfigReference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{49} } + +type isConfigReference_Target interface { + isConfigReference_Target() + MarshalTo([]byte) (int, error) + Size() int +} + +type ConfigReference_File struct { + File *FileTarget `protobuf:"bytes,3,opt,name=file,oneof"` +} + +func (*ConfigReference_File) isConfigReference_Target() {} + +func (m *ConfigReference) GetTarget() isConfigReference_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ConfigReference) GetFile() *FileTarget { + if x, ok := m.GetTarget().(*ConfigReference_File); ok { + return x.File + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConfigReference) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConfigReference_OneofMarshaler, _ConfigReference_OneofUnmarshaler, _ConfigReference_OneofSizer, []interface{}{ + (*ConfigReference_File)(nil), + } +} + +func _ConfigReference_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConfigReference) + // target + switch x := m.Target.(type) { + case *ConfigReference_File: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.File); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ConfigReference.Target has unexpected type %T", x) + } + return nil +} + +func _ConfigReference_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConfigReference) + switch tag { + case 3: // target.file + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileTarget) + err := b.DecodeMessage(msg) + m.Target = &ConfigReference_File{msg} + return true, err + default: + return false, nil + } +} + +func _ConfigReference_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConfigReference) + // target + switch x := m.Target.(type) { + case *ConfigReference_File: + s := proto.Size(x.File) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// BlacklistedCertificate is a record for a blacklisted certificate. It does not +// contain the certificate's CN, because these records are indexed by CN. +type BlacklistedCertificate struct { + // Expiry is the latest known expiration time of a certificate that + // was issued for the given CN. + // Note: can't use stdtime because this field is nullable. + Expiry *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=expiry" json:"expiry,omitempty"` +} + +func (m *BlacklistedCertificate) Reset() { *m = BlacklistedCertificate{} } +func (*BlacklistedCertificate) ProtoMessage() {} +func (*BlacklistedCertificate) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{50} } + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `protobuf:"bytes,1,rep,name=test" json:"test,omitempty"` + // Interval is the time to wait between checks. Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + Interval *google_protobuf1.Duration `protobuf:"bytes,2,opt,name=interval" json:"interval,omitempty"` + // Timeout is the time to wait before considering the check to have hung. + // Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + Timeout *google_protobuf1.Duration `protobuf:"bytes,3,opt,name=timeout" json:"timeout,omitempty"` + // Retries is the number of consecutive failures needed to consider a + // container as unhealthy. Zero means inherit. + Retries int32 `protobuf:"varint,4,opt,name=retries,proto3" json:"retries,omitempty"` + // Start period is the period for container initialization during + // which health check failures will note count towards the maximum + // number of retries. + StartPeriod *google_protobuf1.Duration `protobuf:"bytes,5,opt,name=start_period,json=startPeriod" json:"start_period,omitempty"` +} + +func (m *HealthConfig) Reset() { *m = HealthConfig{} } +func (*HealthConfig) ProtoMessage() {} +func (*HealthConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{51} } + +type MaybeEncryptedRecord struct { + Algorithm MaybeEncryptedRecord_Algorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=docker.swarmkit.v1.MaybeEncryptedRecord_Algorithm" json:"algorithm,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Nonce []byte `protobuf:"bytes,3,opt,name=nonce,proto3" json:"nonce,omitempty"` +} + +func (m *MaybeEncryptedRecord) Reset() { *m = MaybeEncryptedRecord{} } +func (*MaybeEncryptedRecord) ProtoMessage() {} +func (*MaybeEncryptedRecord) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{52} } + +type RootRotation struct { + CACert []byte `protobuf:"bytes,1,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` + CAKey []byte `protobuf:"bytes,2,opt,name=ca_key,json=caKey,proto3" json:"ca_key,omitempty"` + // cross-signed CA cert is the CACert that has been cross-signed by the previous root + CrossSignedCACert []byte `protobuf:"bytes,3,opt,name=cross_signed_ca_cert,json=crossSignedCaCert,proto3" json:"cross_signed_ca_cert,omitempty"` +} + +func (m *RootRotation) Reset() { *m = RootRotation{} } +func (*RootRotation) ProtoMessage() {} +func (*RootRotation) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{53} } + +// Privileges specifies security configuration/permissions. +type Privileges struct { + CredentialSpec *Privileges_CredentialSpec `protobuf:"bytes,1,opt,name=credential_spec,json=credentialSpec" json:"credential_spec,omitempty"` + SELinuxContext *Privileges_SELinuxContext `protobuf:"bytes,2,opt,name=selinux_context,json=selinuxContext" json:"selinux_context,omitempty"` +} + +func (m *Privileges) Reset() { *m = Privileges{} } +func (*Privileges) ProtoMessage() {} +func (*Privileges) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{54} } + +// CredentialSpec for managed service account (Windows only). +type Privileges_CredentialSpec struct { + // Types that are valid to be assigned to Source: + // *Privileges_CredentialSpec_File + // *Privileges_CredentialSpec_Registry + Source isPrivileges_CredentialSpec_Source `protobuf_oneof:"source"` +} + +func (m *Privileges_CredentialSpec) Reset() { *m = Privileges_CredentialSpec{} } +func (*Privileges_CredentialSpec) ProtoMessage() {} +func (*Privileges_CredentialSpec) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{54, 0} +} + +type isPrivileges_CredentialSpec_Source interface { + isPrivileges_CredentialSpec_Source() + MarshalTo([]byte) (int, error) + Size() int +} + +type Privileges_CredentialSpec_File struct { + File string `protobuf:"bytes,1,opt,name=file,proto3,oneof"` +} +type Privileges_CredentialSpec_Registry struct { + Registry string `protobuf:"bytes,2,opt,name=registry,proto3,oneof"` +} + +func (*Privileges_CredentialSpec_File) isPrivileges_CredentialSpec_Source() {} +func (*Privileges_CredentialSpec_Registry) isPrivileges_CredentialSpec_Source() {} + +func (m *Privileges_CredentialSpec) GetSource() isPrivileges_CredentialSpec_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *Privileges_CredentialSpec) GetFile() string { + if x, ok := m.GetSource().(*Privileges_CredentialSpec_File); ok { + return x.File + } + return "" +} + +func (m *Privileges_CredentialSpec) GetRegistry() string { + if x, ok := m.GetSource().(*Privileges_CredentialSpec_Registry); ok { + return x.Registry + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Privileges_CredentialSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Privileges_CredentialSpec_OneofMarshaler, _Privileges_CredentialSpec_OneofUnmarshaler, _Privileges_CredentialSpec_OneofSizer, []interface{}{ + (*Privileges_CredentialSpec_File)(nil), + (*Privileges_CredentialSpec_Registry)(nil), + } +} + +func _Privileges_CredentialSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Privileges_CredentialSpec) + // source + switch x := m.Source.(type) { + case *Privileges_CredentialSpec_File: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.File) + case *Privileges_CredentialSpec_Registry: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Registry) + case nil: + default: + return fmt.Errorf("Privileges_CredentialSpec.Source has unexpected type %T", x) + } + return nil +} + +func _Privileges_CredentialSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Privileges_CredentialSpec) + switch tag { + case 1: // source.file + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Privileges_CredentialSpec_File{x} + return true, err + case 2: // source.registry + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Privileges_CredentialSpec_Registry{x} + return true, err + default: + return false, nil + } +} + +func _Privileges_CredentialSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Privileges_CredentialSpec) + // source + switch x := m.Source.(type) { + case *Privileges_CredentialSpec_File: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.File))) + n += len(x.File) + case *Privileges_CredentialSpec_Registry: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Registry))) + n += len(x.Registry) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// SELinuxContext contains the SELinux labels for the container. +type Privileges_SELinuxContext struct { + Disable bool `protobuf:"varint,1,opt,name=disable,proto3" json:"disable,omitempty"` + User string `protobuf:"bytes,2,opt,name=user,proto3" json:"user,omitempty"` + Role string `protobuf:"bytes,3,opt,name=role,proto3" json:"role,omitempty"` + Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` + Level string `protobuf:"bytes,5,opt,name=level,proto3" json:"level,omitempty"` +} + +func (m *Privileges_SELinuxContext) Reset() { *m = Privileges_SELinuxContext{} } +func (*Privileges_SELinuxContext) ProtoMessage() {} +func (*Privileges_SELinuxContext) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{54, 1} +} + +func init() { + proto.RegisterType((*Version)(nil), "docker.swarmkit.v1.Version") + proto.RegisterType((*IndexEntry)(nil), "docker.swarmkit.v1.IndexEntry") + proto.RegisterType((*Annotations)(nil), "docker.swarmkit.v1.Annotations") + proto.RegisterType((*NamedGenericResource)(nil), "docker.swarmkit.v1.NamedGenericResource") + proto.RegisterType((*DiscreteGenericResource)(nil), "docker.swarmkit.v1.DiscreteGenericResource") + proto.RegisterType((*GenericResource)(nil), "docker.swarmkit.v1.GenericResource") + proto.RegisterType((*Resources)(nil), "docker.swarmkit.v1.Resources") + proto.RegisterType((*ResourceRequirements)(nil), "docker.swarmkit.v1.ResourceRequirements") + proto.RegisterType((*Platform)(nil), "docker.swarmkit.v1.Platform") + proto.RegisterType((*PluginDescription)(nil), "docker.swarmkit.v1.PluginDescription") + proto.RegisterType((*EngineDescription)(nil), "docker.swarmkit.v1.EngineDescription") + proto.RegisterType((*NodeDescription)(nil), "docker.swarmkit.v1.NodeDescription") + proto.RegisterType((*NodeTLSInfo)(nil), "docker.swarmkit.v1.NodeTLSInfo") + proto.RegisterType((*RaftMemberStatus)(nil), "docker.swarmkit.v1.RaftMemberStatus") + proto.RegisterType((*NodeStatus)(nil), "docker.swarmkit.v1.NodeStatus") + proto.RegisterType((*Image)(nil), "docker.swarmkit.v1.Image") + proto.RegisterType((*Mount)(nil), "docker.swarmkit.v1.Mount") + proto.RegisterType((*Mount_BindOptions)(nil), "docker.swarmkit.v1.Mount.BindOptions") + proto.RegisterType((*Mount_VolumeOptions)(nil), "docker.swarmkit.v1.Mount.VolumeOptions") + proto.RegisterType((*Mount_TmpfsOptions)(nil), "docker.swarmkit.v1.Mount.TmpfsOptions") + proto.RegisterType((*RestartPolicy)(nil), "docker.swarmkit.v1.RestartPolicy") + proto.RegisterType((*UpdateConfig)(nil), "docker.swarmkit.v1.UpdateConfig") + proto.RegisterType((*UpdateStatus)(nil), "docker.swarmkit.v1.UpdateStatus") + proto.RegisterType((*ContainerStatus)(nil), "docker.swarmkit.v1.ContainerStatus") + proto.RegisterType((*PortStatus)(nil), "docker.swarmkit.v1.PortStatus") + proto.RegisterType((*TaskStatus)(nil), "docker.swarmkit.v1.TaskStatus") + proto.RegisterType((*NetworkAttachmentConfig)(nil), "docker.swarmkit.v1.NetworkAttachmentConfig") + proto.RegisterType((*IPAMConfig)(nil), "docker.swarmkit.v1.IPAMConfig") + proto.RegisterType((*PortConfig)(nil), "docker.swarmkit.v1.PortConfig") + proto.RegisterType((*Driver)(nil), "docker.swarmkit.v1.Driver") + proto.RegisterType((*IPAMOptions)(nil), "docker.swarmkit.v1.IPAMOptions") + proto.RegisterType((*Peer)(nil), "docker.swarmkit.v1.Peer") + proto.RegisterType((*WeightedPeer)(nil), "docker.swarmkit.v1.WeightedPeer") + proto.RegisterType((*IssuanceStatus)(nil), "docker.swarmkit.v1.IssuanceStatus") + proto.RegisterType((*AcceptancePolicy)(nil), "docker.swarmkit.v1.AcceptancePolicy") + proto.RegisterType((*AcceptancePolicy_RoleAdmissionPolicy)(nil), "docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy") + proto.RegisterType((*AcceptancePolicy_RoleAdmissionPolicy_Secret)(nil), "docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy.Secret") + proto.RegisterType((*ExternalCA)(nil), "docker.swarmkit.v1.ExternalCA") + proto.RegisterType((*CAConfig)(nil), "docker.swarmkit.v1.CAConfig") + proto.RegisterType((*OrchestrationConfig)(nil), "docker.swarmkit.v1.OrchestrationConfig") + proto.RegisterType((*TaskDefaults)(nil), "docker.swarmkit.v1.TaskDefaults") + proto.RegisterType((*DispatcherConfig)(nil), "docker.swarmkit.v1.DispatcherConfig") + proto.RegisterType((*RaftConfig)(nil), "docker.swarmkit.v1.RaftConfig") + proto.RegisterType((*EncryptionConfig)(nil), "docker.swarmkit.v1.EncryptionConfig") + proto.RegisterType((*SpreadOver)(nil), "docker.swarmkit.v1.SpreadOver") + proto.RegisterType((*PlacementPreference)(nil), "docker.swarmkit.v1.PlacementPreference") + proto.RegisterType((*Placement)(nil), "docker.swarmkit.v1.Placement") + proto.RegisterType((*JoinTokens)(nil), "docker.swarmkit.v1.JoinTokens") + proto.RegisterType((*RootCA)(nil), "docker.swarmkit.v1.RootCA") + proto.RegisterType((*Certificate)(nil), "docker.swarmkit.v1.Certificate") + proto.RegisterType((*EncryptionKey)(nil), "docker.swarmkit.v1.EncryptionKey") + proto.RegisterType((*ManagerStatus)(nil), "docker.swarmkit.v1.ManagerStatus") + proto.RegisterType((*FileTarget)(nil), "docker.swarmkit.v1.FileTarget") + proto.RegisterType((*SecretReference)(nil), "docker.swarmkit.v1.SecretReference") + proto.RegisterType((*ConfigReference)(nil), "docker.swarmkit.v1.ConfigReference") + proto.RegisterType((*BlacklistedCertificate)(nil), "docker.swarmkit.v1.BlacklistedCertificate") + proto.RegisterType((*HealthConfig)(nil), "docker.swarmkit.v1.HealthConfig") + proto.RegisterType((*MaybeEncryptedRecord)(nil), "docker.swarmkit.v1.MaybeEncryptedRecord") + proto.RegisterType((*RootRotation)(nil), "docker.swarmkit.v1.RootRotation") + proto.RegisterType((*Privileges)(nil), "docker.swarmkit.v1.Privileges") + proto.RegisterType((*Privileges_CredentialSpec)(nil), "docker.swarmkit.v1.Privileges.CredentialSpec") + proto.RegisterType((*Privileges_SELinuxContext)(nil), "docker.swarmkit.v1.Privileges.SELinuxContext") + proto.RegisterEnum("docker.swarmkit.v1.ResourceType", ResourceType_name, ResourceType_value) + proto.RegisterEnum("docker.swarmkit.v1.TaskState", TaskState_name, TaskState_value) + proto.RegisterEnum("docker.swarmkit.v1.NodeRole", NodeRole_name, NodeRole_value) + proto.RegisterEnum("docker.swarmkit.v1.RaftMemberStatus_Reachability", RaftMemberStatus_Reachability_name, RaftMemberStatus_Reachability_value) + proto.RegisterEnum("docker.swarmkit.v1.NodeStatus_State", NodeStatus_State_name, NodeStatus_State_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_MountType", Mount_MountType_name, Mount_MountType_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_MountConsistency", Mount_MountConsistency_name, Mount_MountConsistency_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_BindOptions_MountPropagation", Mount_BindOptions_MountPropagation_name, Mount_BindOptions_MountPropagation_value) + proto.RegisterEnum("docker.swarmkit.v1.RestartPolicy_RestartCondition", RestartPolicy_RestartCondition_name, RestartPolicy_RestartCondition_value) + proto.RegisterEnum("docker.swarmkit.v1.UpdateConfig_FailureAction", UpdateConfig_FailureAction_name, UpdateConfig_FailureAction_value) + proto.RegisterEnum("docker.swarmkit.v1.UpdateConfig_UpdateOrder", UpdateConfig_UpdateOrder_name, UpdateConfig_UpdateOrder_value) + proto.RegisterEnum("docker.swarmkit.v1.UpdateStatus_UpdateState", UpdateStatus_UpdateState_name, UpdateStatus_UpdateState_value) + proto.RegisterEnum("docker.swarmkit.v1.IPAMConfig_AddressFamily", IPAMConfig_AddressFamily_name, IPAMConfig_AddressFamily_value) + proto.RegisterEnum("docker.swarmkit.v1.PortConfig_Protocol", PortConfig_Protocol_name, PortConfig_Protocol_value) + proto.RegisterEnum("docker.swarmkit.v1.PortConfig_PublishMode", PortConfig_PublishMode_name, PortConfig_PublishMode_value) + proto.RegisterEnum("docker.swarmkit.v1.IssuanceStatus_State", IssuanceStatus_State_name, IssuanceStatus_State_value) + proto.RegisterEnum("docker.swarmkit.v1.ExternalCA_CAProtocol", ExternalCA_CAProtocol_name, ExternalCA_CAProtocol_value) + proto.RegisterEnum("docker.swarmkit.v1.EncryptionKey_Algorithm", EncryptionKey_Algorithm_name, EncryptionKey_Algorithm_value) + proto.RegisterEnum("docker.swarmkit.v1.MaybeEncryptedRecord_Algorithm", MaybeEncryptedRecord_Algorithm_name, MaybeEncryptedRecord_Algorithm_value) +} + +func (m *Version) Copy() *Version { + if m == nil { + return nil + } + o := &Version{} + o.CopyFrom(m) + return o +} + +func (m *Version) CopyFrom(src interface{}) { + + o := src.(*Version) + *m = *o +} + +func (m *IndexEntry) Copy() *IndexEntry { + if m == nil { + return nil + } + o := &IndexEntry{} + o.CopyFrom(m) + return o +} + +func (m *IndexEntry) CopyFrom(src interface{}) { + + o := src.(*IndexEntry) + *m = *o +} + +func (m *Annotations) Copy() *Annotations { + if m == nil { + return nil + } + o := &Annotations{} + o.CopyFrom(m) + return o +} + +func (m *Annotations) CopyFrom(src interface{}) { + + o := src.(*Annotations) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Indices != nil { + m.Indices = make([]IndexEntry, len(o.Indices)) + for i := range m.Indices { + deepcopy.Copy(&m.Indices[i], &o.Indices[i]) + } + } + +} + +func (m *NamedGenericResource) Copy() *NamedGenericResource { + if m == nil { + return nil + } + o := &NamedGenericResource{} + o.CopyFrom(m) + return o +} + +func (m *NamedGenericResource) CopyFrom(src interface{}) { + + o := src.(*NamedGenericResource) + *m = *o +} + +func (m *DiscreteGenericResource) Copy() *DiscreteGenericResource { + if m == nil { + return nil + } + o := &DiscreteGenericResource{} + o.CopyFrom(m) + return o +} + +func (m *DiscreteGenericResource) CopyFrom(src interface{}) { + + o := src.(*DiscreteGenericResource) + *m = *o +} + +func (m *GenericResource) Copy() *GenericResource { + if m == nil { + return nil + } + o := &GenericResource{} + o.CopyFrom(m) + return o +} + +func (m *GenericResource) CopyFrom(src interface{}) { + + o := src.(*GenericResource) + *m = *o + if o.Resource != nil { + switch o.Resource.(type) { + case *GenericResource_NamedResourceSpec: + v := GenericResource_NamedResourceSpec{ + NamedResourceSpec: &NamedGenericResource{}, + } + deepcopy.Copy(v.NamedResourceSpec, o.GetNamedResourceSpec()) + m.Resource = &v + case *GenericResource_DiscreteResourceSpec: + v := GenericResource_DiscreteResourceSpec{ + DiscreteResourceSpec: &DiscreteGenericResource{}, + } + deepcopy.Copy(v.DiscreteResourceSpec, o.GetDiscreteResourceSpec()) + m.Resource = &v + } + } + +} + +func (m *Resources) Copy() *Resources { + if m == nil { + return nil + } + o := &Resources{} + o.CopyFrom(m) + return o +} + +func (m *Resources) CopyFrom(src interface{}) { + + o := src.(*Resources) + *m = *o + if o.Generic != nil { + m.Generic = make([]*GenericResource, len(o.Generic)) + for i := range m.Generic { + m.Generic[i] = &GenericResource{} + deepcopy.Copy(m.Generic[i], o.Generic[i]) + } + } + +} + +func (m *ResourceRequirements) Copy() *ResourceRequirements { + if m == nil { + return nil + } + o := &ResourceRequirements{} + o.CopyFrom(m) + return o +} + +func (m *ResourceRequirements) CopyFrom(src interface{}) { + + o := src.(*ResourceRequirements) + *m = *o + if o.Limits != nil { + m.Limits = &Resources{} + deepcopy.Copy(m.Limits, o.Limits) + } + if o.Reservations != nil { + m.Reservations = &Resources{} + deepcopy.Copy(m.Reservations, o.Reservations) + } +} + +func (m *Platform) Copy() *Platform { + if m == nil { + return nil + } + o := &Platform{} + o.CopyFrom(m) + return o +} + +func (m *Platform) CopyFrom(src interface{}) { + + o := src.(*Platform) + *m = *o +} + +func (m *PluginDescription) Copy() *PluginDescription { + if m == nil { + return nil + } + o := &PluginDescription{} + o.CopyFrom(m) + return o +} + +func (m *PluginDescription) CopyFrom(src interface{}) { + + o := src.(*PluginDescription) + *m = *o +} + +func (m *EngineDescription) Copy() *EngineDescription { + if m == nil { + return nil + } + o := &EngineDescription{} + o.CopyFrom(m) + return o +} + +func (m *EngineDescription) CopyFrom(src interface{}) { + + o := src.(*EngineDescription) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Plugins != nil { + m.Plugins = make([]PluginDescription, len(o.Plugins)) + for i := range m.Plugins { + deepcopy.Copy(&m.Plugins[i], &o.Plugins[i]) + } + } + +} + +func (m *NodeDescription) Copy() *NodeDescription { + if m == nil { + return nil + } + o := &NodeDescription{} + o.CopyFrom(m) + return o +} + +func (m *NodeDescription) CopyFrom(src interface{}) { + + o := src.(*NodeDescription) + *m = *o + if o.Platform != nil { + m.Platform = &Platform{} + deepcopy.Copy(m.Platform, o.Platform) + } + if o.Resources != nil { + m.Resources = &Resources{} + deepcopy.Copy(m.Resources, o.Resources) + } + if o.Engine != nil { + m.Engine = &EngineDescription{} + deepcopy.Copy(m.Engine, o.Engine) + } + if o.TLSInfo != nil { + m.TLSInfo = &NodeTLSInfo{} + deepcopy.Copy(m.TLSInfo, o.TLSInfo) + } +} + +func (m *NodeTLSInfo) Copy() *NodeTLSInfo { + if m == nil { + return nil + } + o := &NodeTLSInfo{} + o.CopyFrom(m) + return o +} + +func (m *NodeTLSInfo) CopyFrom(src interface{}) { + + o := src.(*NodeTLSInfo) + *m = *o + if o.TrustRoot != nil { + m.TrustRoot = make([]byte, len(o.TrustRoot)) + copy(m.TrustRoot, o.TrustRoot) + } + if o.CertIssuerSubject != nil { + m.CertIssuerSubject = make([]byte, len(o.CertIssuerSubject)) + copy(m.CertIssuerSubject, o.CertIssuerSubject) + } + if o.CertIssuerPublicKey != nil { + m.CertIssuerPublicKey = make([]byte, len(o.CertIssuerPublicKey)) + copy(m.CertIssuerPublicKey, o.CertIssuerPublicKey) + } +} + +func (m *RaftMemberStatus) Copy() *RaftMemberStatus { + if m == nil { + return nil + } + o := &RaftMemberStatus{} + o.CopyFrom(m) + return o +} + +func (m *RaftMemberStatus) CopyFrom(src interface{}) { + + o := src.(*RaftMemberStatus) + *m = *o +} + +func (m *NodeStatus) Copy() *NodeStatus { + if m == nil { + return nil + } + o := &NodeStatus{} + o.CopyFrom(m) + return o +} + +func (m *NodeStatus) CopyFrom(src interface{}) { + + o := src.(*NodeStatus) + *m = *o +} + +func (m *Image) Copy() *Image { + if m == nil { + return nil + } + o := &Image{} + o.CopyFrom(m) + return o +} + +func (m *Image) CopyFrom(src interface{}) { + + o := src.(*Image) + *m = *o +} + +func (m *Mount) Copy() *Mount { + if m == nil { + return nil + } + o := &Mount{} + o.CopyFrom(m) + return o +} + +func (m *Mount) CopyFrom(src interface{}) { + + o := src.(*Mount) + *m = *o + if o.BindOptions != nil { + m.BindOptions = &Mount_BindOptions{} + deepcopy.Copy(m.BindOptions, o.BindOptions) + } + if o.VolumeOptions != nil { + m.VolumeOptions = &Mount_VolumeOptions{} + deepcopy.Copy(m.VolumeOptions, o.VolumeOptions) + } + if o.TmpfsOptions != nil { + m.TmpfsOptions = &Mount_TmpfsOptions{} + deepcopy.Copy(m.TmpfsOptions, o.TmpfsOptions) + } +} + +func (m *Mount_BindOptions) Copy() *Mount_BindOptions { + if m == nil { + return nil + } + o := &Mount_BindOptions{} + o.CopyFrom(m) + return o +} + +func (m *Mount_BindOptions) CopyFrom(src interface{}) { + + o := src.(*Mount_BindOptions) + *m = *o +} + +func (m *Mount_VolumeOptions) Copy() *Mount_VolumeOptions { + if m == nil { + return nil + } + o := &Mount_VolumeOptions{} + o.CopyFrom(m) + return o +} + +func (m *Mount_VolumeOptions) CopyFrom(src interface{}) { + + o := src.(*Mount_VolumeOptions) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.DriverConfig != nil { + m.DriverConfig = &Driver{} + deepcopy.Copy(m.DriverConfig, o.DriverConfig) + } +} + +func (m *Mount_TmpfsOptions) Copy() *Mount_TmpfsOptions { + if m == nil { + return nil + } + o := &Mount_TmpfsOptions{} + o.CopyFrom(m) + return o +} + +func (m *Mount_TmpfsOptions) CopyFrom(src interface{}) { + + o := src.(*Mount_TmpfsOptions) + *m = *o +} + +func (m *RestartPolicy) Copy() *RestartPolicy { + if m == nil { + return nil + } + o := &RestartPolicy{} + o.CopyFrom(m) + return o +} + +func (m *RestartPolicy) CopyFrom(src interface{}) { + + o := src.(*RestartPolicy) + *m = *o + if o.Delay != nil { + m.Delay = &google_protobuf1.Duration{} + deepcopy.Copy(m.Delay, o.Delay) + } + if o.Window != nil { + m.Window = &google_protobuf1.Duration{} + deepcopy.Copy(m.Window, o.Window) + } +} + +func (m *UpdateConfig) Copy() *UpdateConfig { + if m == nil { + return nil + } + o := &UpdateConfig{} + o.CopyFrom(m) + return o +} + +func (m *UpdateConfig) CopyFrom(src interface{}) { + + o := src.(*UpdateConfig) + *m = *o + deepcopy.Copy(&m.Delay, &o.Delay) + if o.Monitor != nil { + m.Monitor = &google_protobuf1.Duration{} + deepcopy.Copy(m.Monitor, o.Monitor) + } +} + +func (m *UpdateStatus) Copy() *UpdateStatus { + if m == nil { + return nil + } + o := &UpdateStatus{} + o.CopyFrom(m) + return o +} + +func (m *UpdateStatus) CopyFrom(src interface{}) { + + o := src.(*UpdateStatus) + *m = *o + if o.StartedAt != nil { + m.StartedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.StartedAt, o.StartedAt) + } + if o.CompletedAt != nil { + m.CompletedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.CompletedAt, o.CompletedAt) + } +} + +func (m *ContainerStatus) Copy() *ContainerStatus { + if m == nil { + return nil + } + o := &ContainerStatus{} + o.CopyFrom(m) + return o +} + +func (m *ContainerStatus) CopyFrom(src interface{}) { + + o := src.(*ContainerStatus) + *m = *o +} + +func (m *PortStatus) Copy() *PortStatus { + if m == nil { + return nil + } + o := &PortStatus{} + o.CopyFrom(m) + return o +} + +func (m *PortStatus) CopyFrom(src interface{}) { + + o := src.(*PortStatus) + *m = *o + if o.Ports != nil { + m.Ports = make([]*PortConfig, len(o.Ports)) + for i := range m.Ports { + m.Ports[i] = &PortConfig{} + deepcopy.Copy(m.Ports[i], o.Ports[i]) + } + } + +} + +func (m *TaskStatus) Copy() *TaskStatus { + if m == nil { + return nil + } + o := &TaskStatus{} + o.CopyFrom(m) + return o +} + +func (m *TaskStatus) CopyFrom(src interface{}) { + + o := src.(*TaskStatus) + *m = *o + if o.Timestamp != nil { + m.Timestamp = &google_protobuf.Timestamp{} + deepcopy.Copy(m.Timestamp, o.Timestamp) + } + if o.PortStatus != nil { + m.PortStatus = &PortStatus{} + deepcopy.Copy(m.PortStatus, o.PortStatus) + } + if o.AppliedAt != nil { + m.AppliedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.AppliedAt, o.AppliedAt) + } + if o.RuntimeStatus != nil { + switch o.RuntimeStatus.(type) { + case *TaskStatus_Container: + v := TaskStatus_Container{ + Container: &ContainerStatus{}, + } + deepcopy.Copy(v.Container, o.GetContainer()) + m.RuntimeStatus = &v + } + } + +} + +func (m *NetworkAttachmentConfig) Copy() *NetworkAttachmentConfig { + if m == nil { + return nil + } + o := &NetworkAttachmentConfig{} + o.CopyFrom(m) + return o +} + +func (m *NetworkAttachmentConfig) CopyFrom(src interface{}) { + + o := src.(*NetworkAttachmentConfig) + *m = *o + if o.Aliases != nil { + m.Aliases = make([]string, len(o.Aliases)) + copy(m.Aliases, o.Aliases) + } + + if o.Addresses != nil { + m.Addresses = make([]string, len(o.Addresses)) + copy(m.Addresses, o.Addresses) + } + + if o.DriverAttachmentOpts != nil { + m.DriverAttachmentOpts = make(map[string]string, len(o.DriverAttachmentOpts)) + for k, v := range o.DriverAttachmentOpts { + m.DriverAttachmentOpts[k] = v + } + } + +} + +func (m *IPAMConfig) Copy() *IPAMConfig { + if m == nil { + return nil + } + o := &IPAMConfig{} + o.CopyFrom(m) + return o +} + +func (m *IPAMConfig) CopyFrom(src interface{}) { + + o := src.(*IPAMConfig) + *m = *o + if o.Reserved != nil { + m.Reserved = make(map[string]string, len(o.Reserved)) + for k, v := range o.Reserved { + m.Reserved[k] = v + } + } + +} + +func (m *PortConfig) Copy() *PortConfig { + if m == nil { + return nil + } + o := &PortConfig{} + o.CopyFrom(m) + return o +} + +func (m *PortConfig) CopyFrom(src interface{}) { + + o := src.(*PortConfig) + *m = *o +} + +func (m *Driver) Copy() *Driver { + if m == nil { + return nil + } + o := &Driver{} + o.CopyFrom(m) + return o +} + +func (m *Driver) CopyFrom(src interface{}) { + + o := src.(*Driver) + *m = *o + if o.Options != nil { + m.Options = make(map[string]string, len(o.Options)) + for k, v := range o.Options { + m.Options[k] = v + } + } + +} + +func (m *IPAMOptions) Copy() *IPAMOptions { + if m == nil { + return nil + } + o := &IPAMOptions{} + o.CopyFrom(m) + return o +} + +func (m *IPAMOptions) CopyFrom(src interface{}) { + + o := src.(*IPAMOptions) + *m = *o + if o.Driver != nil { + m.Driver = &Driver{} + deepcopy.Copy(m.Driver, o.Driver) + } + if o.Configs != nil { + m.Configs = make([]*IPAMConfig, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &IPAMConfig{} + deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + +} + +func (m *Peer) Copy() *Peer { + if m == nil { + return nil + } + o := &Peer{} + o.CopyFrom(m) + return o +} + +func (m *Peer) CopyFrom(src interface{}) { + + o := src.(*Peer) + *m = *o +} + +func (m *WeightedPeer) Copy() *WeightedPeer { + if m == nil { + return nil + } + o := &WeightedPeer{} + o.CopyFrom(m) + return o +} + +func (m *WeightedPeer) CopyFrom(src interface{}) { + + o := src.(*WeightedPeer) + *m = *o + if o.Peer != nil { + m.Peer = &Peer{} + deepcopy.Copy(m.Peer, o.Peer) + } +} + +func (m *IssuanceStatus) Copy() *IssuanceStatus { + if m == nil { + return nil + } + o := &IssuanceStatus{} + o.CopyFrom(m) + return o +} + +func (m *IssuanceStatus) CopyFrom(src interface{}) { + + o := src.(*IssuanceStatus) + *m = *o +} + +func (m *AcceptancePolicy) Copy() *AcceptancePolicy { + if m == nil { + return nil + } + o := &AcceptancePolicy{} + o.CopyFrom(m) + return o +} + +func (m *AcceptancePolicy) CopyFrom(src interface{}) { + + o := src.(*AcceptancePolicy) + *m = *o + if o.Policies != nil { + m.Policies = make([]*AcceptancePolicy_RoleAdmissionPolicy, len(o.Policies)) + for i := range m.Policies { + m.Policies[i] = &AcceptancePolicy_RoleAdmissionPolicy{} + deepcopy.Copy(m.Policies[i], o.Policies[i]) + } + } + +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Copy() *AcceptancePolicy_RoleAdmissionPolicy { + if m == nil { + return nil + } + o := &AcceptancePolicy_RoleAdmissionPolicy{} + o.CopyFrom(m) + return o +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) CopyFrom(src interface{}) { + + o := src.(*AcceptancePolicy_RoleAdmissionPolicy) + *m = *o + if o.Secret != nil { + m.Secret = &AcceptancePolicy_RoleAdmissionPolicy_Secret{} + deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Copy() *AcceptancePolicy_RoleAdmissionPolicy_Secret { + if m == nil { + return nil + } + o := &AcceptancePolicy_RoleAdmissionPolicy_Secret{} + o.CopyFrom(m) + return o +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) CopyFrom(src interface{}) { + + o := src.(*AcceptancePolicy_RoleAdmissionPolicy_Secret) + *m = *o + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } +} + +func (m *ExternalCA) Copy() *ExternalCA { + if m == nil { + return nil + } + o := &ExternalCA{} + o.CopyFrom(m) + return o +} + +func (m *ExternalCA) CopyFrom(src interface{}) { + + o := src.(*ExternalCA) + *m = *o + if o.Options != nil { + m.Options = make(map[string]string, len(o.Options)) + for k, v := range o.Options { + m.Options[k] = v + } + } + + if o.CACert != nil { + m.CACert = make([]byte, len(o.CACert)) + copy(m.CACert, o.CACert) + } +} + +func (m *CAConfig) Copy() *CAConfig { + if m == nil { + return nil + } + o := &CAConfig{} + o.CopyFrom(m) + return o +} + +func (m *CAConfig) CopyFrom(src interface{}) { + + o := src.(*CAConfig) + *m = *o + if o.NodeCertExpiry != nil { + m.NodeCertExpiry = &google_protobuf1.Duration{} + deepcopy.Copy(m.NodeCertExpiry, o.NodeCertExpiry) + } + if o.ExternalCAs != nil { + m.ExternalCAs = make([]*ExternalCA, len(o.ExternalCAs)) + for i := range m.ExternalCAs { + m.ExternalCAs[i] = &ExternalCA{} + deepcopy.Copy(m.ExternalCAs[i], o.ExternalCAs[i]) + } + } + + if o.SigningCACert != nil { + m.SigningCACert = make([]byte, len(o.SigningCACert)) + copy(m.SigningCACert, o.SigningCACert) + } + if o.SigningCAKey != nil { + m.SigningCAKey = make([]byte, len(o.SigningCAKey)) + copy(m.SigningCAKey, o.SigningCAKey) + } +} + +func (m *OrchestrationConfig) Copy() *OrchestrationConfig { + if m == nil { + return nil + } + o := &OrchestrationConfig{} + o.CopyFrom(m) + return o +} + +func (m *OrchestrationConfig) CopyFrom(src interface{}) { + + o := src.(*OrchestrationConfig) + *m = *o +} + +func (m *TaskDefaults) Copy() *TaskDefaults { + if m == nil { + return nil + } + o := &TaskDefaults{} + o.CopyFrom(m) + return o +} + +func (m *TaskDefaults) CopyFrom(src interface{}) { + + o := src.(*TaskDefaults) + *m = *o + if o.LogDriver != nil { + m.LogDriver = &Driver{} + deepcopy.Copy(m.LogDriver, o.LogDriver) + } +} + +func (m *DispatcherConfig) Copy() *DispatcherConfig { + if m == nil { + return nil + } + o := &DispatcherConfig{} + o.CopyFrom(m) + return o +} + +func (m *DispatcherConfig) CopyFrom(src interface{}) { + + o := src.(*DispatcherConfig) + *m = *o + if o.HeartbeatPeriod != nil { + m.HeartbeatPeriod = &google_protobuf1.Duration{} + deepcopy.Copy(m.HeartbeatPeriod, o.HeartbeatPeriod) + } +} + +func (m *RaftConfig) Copy() *RaftConfig { + if m == nil { + return nil + } + o := &RaftConfig{} + o.CopyFrom(m) + return o +} + +func (m *RaftConfig) CopyFrom(src interface{}) { + + o := src.(*RaftConfig) + *m = *o +} + +func (m *EncryptionConfig) Copy() *EncryptionConfig { + if m == nil { + return nil + } + o := &EncryptionConfig{} + o.CopyFrom(m) + return o +} + +func (m *EncryptionConfig) CopyFrom(src interface{}) { + + o := src.(*EncryptionConfig) + *m = *o +} + +func (m *SpreadOver) Copy() *SpreadOver { + if m == nil { + return nil + } + o := &SpreadOver{} + o.CopyFrom(m) + return o +} + +func (m *SpreadOver) CopyFrom(src interface{}) { + + o := src.(*SpreadOver) + *m = *o +} + +func (m *PlacementPreference) Copy() *PlacementPreference { + if m == nil { + return nil + } + o := &PlacementPreference{} + o.CopyFrom(m) + return o +} + +func (m *PlacementPreference) CopyFrom(src interface{}) { + + o := src.(*PlacementPreference) + *m = *o + if o.Preference != nil { + switch o.Preference.(type) { + case *PlacementPreference_Spread: + v := PlacementPreference_Spread{ + Spread: &SpreadOver{}, + } + deepcopy.Copy(v.Spread, o.GetSpread()) + m.Preference = &v + } + } + +} + +func (m *Placement) Copy() *Placement { + if m == nil { + return nil + } + o := &Placement{} + o.CopyFrom(m) + return o +} + +func (m *Placement) CopyFrom(src interface{}) { + + o := src.(*Placement) + *m = *o + if o.Constraints != nil { + m.Constraints = make([]string, len(o.Constraints)) + copy(m.Constraints, o.Constraints) + } + + if o.Preferences != nil { + m.Preferences = make([]*PlacementPreference, len(o.Preferences)) + for i := range m.Preferences { + m.Preferences[i] = &PlacementPreference{} + deepcopy.Copy(m.Preferences[i], o.Preferences[i]) + } + } + + if o.Platforms != nil { + m.Platforms = make([]*Platform, len(o.Platforms)) + for i := range m.Platforms { + m.Platforms[i] = &Platform{} + deepcopy.Copy(m.Platforms[i], o.Platforms[i]) + } + } + +} + +func (m *JoinTokens) Copy() *JoinTokens { + if m == nil { + return nil + } + o := &JoinTokens{} + o.CopyFrom(m) + return o +} + +func (m *JoinTokens) CopyFrom(src interface{}) { + + o := src.(*JoinTokens) + *m = *o +} + +func (m *RootCA) Copy() *RootCA { + if m == nil { + return nil + } + o := &RootCA{} + o.CopyFrom(m) + return o +} + +func (m *RootCA) CopyFrom(src interface{}) { + + o := src.(*RootCA) + *m = *o + if o.CAKey != nil { + m.CAKey = make([]byte, len(o.CAKey)) + copy(m.CAKey, o.CAKey) + } + if o.CACert != nil { + m.CACert = make([]byte, len(o.CACert)) + copy(m.CACert, o.CACert) + } + deepcopy.Copy(&m.JoinTokens, &o.JoinTokens) + if o.RootRotation != nil { + m.RootRotation = &RootRotation{} + deepcopy.Copy(m.RootRotation, o.RootRotation) + } +} + +func (m *Certificate) Copy() *Certificate { + if m == nil { + return nil + } + o := &Certificate{} + o.CopyFrom(m) + return o +} + +func (m *Certificate) CopyFrom(src interface{}) { + + o := src.(*Certificate) + *m = *o + if o.CSR != nil { + m.CSR = make([]byte, len(o.CSR)) + copy(m.CSR, o.CSR) + } + deepcopy.Copy(&m.Status, &o.Status) + if o.Certificate != nil { + m.Certificate = make([]byte, len(o.Certificate)) + copy(m.Certificate, o.Certificate) + } +} + +func (m *EncryptionKey) Copy() *EncryptionKey { + if m == nil { + return nil + } + o := &EncryptionKey{} + o.CopyFrom(m) + return o +} + +func (m *EncryptionKey) CopyFrom(src interface{}) { + + o := src.(*EncryptionKey) + *m = *o + if o.Key != nil { + m.Key = make([]byte, len(o.Key)) + copy(m.Key, o.Key) + } +} + +func (m *ManagerStatus) Copy() *ManagerStatus { + if m == nil { + return nil + } + o := &ManagerStatus{} + o.CopyFrom(m) + return o +} + +func (m *ManagerStatus) CopyFrom(src interface{}) { + + o := src.(*ManagerStatus) + *m = *o +} + +func (m *FileTarget) Copy() *FileTarget { + if m == nil { + return nil + } + o := &FileTarget{} + o.CopyFrom(m) + return o +} + +func (m *FileTarget) CopyFrom(src interface{}) { + + o := src.(*FileTarget) + *m = *o +} + +func (m *SecretReference) Copy() *SecretReference { + if m == nil { + return nil + } + o := &SecretReference{} + o.CopyFrom(m) + return o +} + +func (m *SecretReference) CopyFrom(src interface{}) { + + o := src.(*SecretReference) + *m = *o + if o.Target != nil { + switch o.Target.(type) { + case *SecretReference_File: + v := SecretReference_File{ + File: &FileTarget{}, + } + deepcopy.Copy(v.File, o.GetFile()) + m.Target = &v + } + } + +} + +func (m *ConfigReference) Copy() *ConfigReference { + if m == nil { + return nil + } + o := &ConfigReference{} + o.CopyFrom(m) + return o +} + +func (m *ConfigReference) CopyFrom(src interface{}) { + + o := src.(*ConfigReference) + *m = *o + if o.Target != nil { + switch o.Target.(type) { + case *ConfigReference_File: + v := ConfigReference_File{ + File: &FileTarget{}, + } + deepcopy.Copy(v.File, o.GetFile()) + m.Target = &v + } + } + +} + +func (m *BlacklistedCertificate) Copy() *BlacklistedCertificate { + if m == nil { + return nil + } + o := &BlacklistedCertificate{} + o.CopyFrom(m) + return o +} + +func (m *BlacklistedCertificate) CopyFrom(src interface{}) { + + o := src.(*BlacklistedCertificate) + *m = *o + if o.Expiry != nil { + m.Expiry = &google_protobuf.Timestamp{} + deepcopy.Copy(m.Expiry, o.Expiry) + } +} + +func (m *HealthConfig) Copy() *HealthConfig { + if m == nil { + return nil + } + o := &HealthConfig{} + o.CopyFrom(m) + return o +} + +func (m *HealthConfig) CopyFrom(src interface{}) { + + o := src.(*HealthConfig) + *m = *o + if o.Test != nil { + m.Test = make([]string, len(o.Test)) + copy(m.Test, o.Test) + } + + if o.Interval != nil { + m.Interval = &google_protobuf1.Duration{} + deepcopy.Copy(m.Interval, o.Interval) + } + if o.Timeout != nil { + m.Timeout = &google_protobuf1.Duration{} + deepcopy.Copy(m.Timeout, o.Timeout) + } + if o.StartPeriod != nil { + m.StartPeriod = &google_protobuf1.Duration{} + deepcopy.Copy(m.StartPeriod, o.StartPeriod) + } +} + +func (m *MaybeEncryptedRecord) Copy() *MaybeEncryptedRecord { + if m == nil { + return nil + } + o := &MaybeEncryptedRecord{} + o.CopyFrom(m) + return o +} + +func (m *MaybeEncryptedRecord) CopyFrom(src interface{}) { + + o := src.(*MaybeEncryptedRecord) + *m = *o + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Nonce != nil { + m.Nonce = make([]byte, len(o.Nonce)) + copy(m.Nonce, o.Nonce) + } +} + +func (m *RootRotation) Copy() *RootRotation { + if m == nil { + return nil + } + o := &RootRotation{} + o.CopyFrom(m) + return o +} + +func (m *RootRotation) CopyFrom(src interface{}) { + + o := src.(*RootRotation) + *m = *o + if o.CACert != nil { + m.CACert = make([]byte, len(o.CACert)) + copy(m.CACert, o.CACert) + } + if o.CAKey != nil { + m.CAKey = make([]byte, len(o.CAKey)) + copy(m.CAKey, o.CAKey) + } + if o.CrossSignedCACert != nil { + m.CrossSignedCACert = make([]byte, len(o.CrossSignedCACert)) + copy(m.CrossSignedCACert, o.CrossSignedCACert) + } +} + +func (m *Privileges) Copy() *Privileges { + if m == nil { + return nil + } + o := &Privileges{} + o.CopyFrom(m) + return o +} + +func (m *Privileges) CopyFrom(src interface{}) { + + o := src.(*Privileges) + *m = *o + if o.CredentialSpec != nil { + m.CredentialSpec = &Privileges_CredentialSpec{} + deepcopy.Copy(m.CredentialSpec, o.CredentialSpec) + } + if o.SELinuxContext != nil { + m.SELinuxContext = &Privileges_SELinuxContext{} + deepcopy.Copy(m.SELinuxContext, o.SELinuxContext) + } +} + +func (m *Privileges_CredentialSpec) Copy() *Privileges_CredentialSpec { + if m == nil { + return nil + } + o := &Privileges_CredentialSpec{} + o.CopyFrom(m) + return o +} + +func (m *Privileges_CredentialSpec) CopyFrom(src interface{}) { + + o := src.(*Privileges_CredentialSpec) + *m = *o + if o.Source != nil { + switch o.Source.(type) { + case *Privileges_CredentialSpec_File: + v := Privileges_CredentialSpec_File{ + File: o.GetFile(), + } + m.Source = &v + case *Privileges_CredentialSpec_Registry: + v := Privileges_CredentialSpec_Registry{ + Registry: o.GetRegistry(), + } + m.Source = &v + } + } + +} + +func (m *Privileges_SELinuxContext) Copy() *Privileges_SELinuxContext { + if m == nil { + return nil + } + o := &Privileges_SELinuxContext{} + o.CopyFrom(m) + return o +} + +func (m *Privileges_SELinuxContext) CopyFrom(src interface{}) { + + o := src.(*Privileges_SELinuxContext) + *m = *o +} + +func (m *Version) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Version) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Index != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + } + return i, nil +} + +func (m *IndexEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IndexEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Val) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Val))) + i += copy(dAtA[i:], m.Val) + } + return i, nil +} + +func (m *Annotations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Annotations) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Indices) > 0 { + for _, msg := range m.Indices { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NamedGenericResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedGenericResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *DiscreteGenericResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiscreteGenericResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Value != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *GenericResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Resource != nil { + nn1, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + return i, nil +} + +func (m *GenericResource_NamedResourceSpec) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.NamedResourceSpec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NamedResourceSpec.Size())) + n2, err := m.NamedResourceSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} +func (m *GenericResource_DiscreteResourceSpec) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.DiscreteResourceSpec != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DiscreteResourceSpec.Size())) + n3, err := m.DiscreteResourceSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Resources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resources) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NanoCPUs != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NanoCPUs)) + } + if m.MemoryBytes != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MemoryBytes)) + } + if len(m.Generic) > 0 { + for _, msg := range m.Generic { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Limits != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Limits.Size())) + n4, err := m.Limits.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Reservations != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Reservations.Size())) + n5, err := m.Reservations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *Platform) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Platform) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Architecture) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Architecture))) + i += copy(dAtA[i:], m.Architecture) + } + if len(m.OS) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.OS))) + i += copy(dAtA[i:], m.OS) + } + return i, nil +} + +func (m *PluginDescription) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginDescription) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *EngineDescription) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EngineDescription) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.EngineVersion) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.EngineVersion))) + i += copy(dAtA[i:], m.EngineVersion) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Plugins) > 0 { + for _, msg := range m.Plugins { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeDescription) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeDescription) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hostname) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + } + if m.Platform != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Platform.Size())) + n6, err := m.Platform.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Resources != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Resources.Size())) + n7, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Engine != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Engine.Size())) + n8, err := m.Engine.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.TLSInfo != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TLSInfo.Size())) + n9, err := m.TLSInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.FIPS { + dAtA[i] = 0x30 + i++ + if m.FIPS { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *NodeTLSInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeTLSInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TrustRoot) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.TrustRoot))) + i += copy(dAtA[i:], m.TrustRoot) + } + if len(m.CertIssuerSubject) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CertIssuerSubject))) + i += copy(dAtA[i:], m.CertIssuerSubject) + } + if len(m.CertIssuerPublicKey) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CertIssuerPublicKey))) + i += copy(dAtA[i:], m.CertIssuerPublicKey) + } + return i, nil +} + +func (m *RaftMemberStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftMemberStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Leader { + dAtA[i] = 0x8 + i++ + if m.Leader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Reachability != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Reachability)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + return i, nil +} + +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *Image) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Reference) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Reference))) + i += copy(dAtA[i:], m.Reference) + } + return i, nil +} + +func (m *Mount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + } + if len(m.Source) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + } + if len(m.Target) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Target))) + i += copy(dAtA[i:], m.Target) + } + if m.ReadOnly { + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.BindOptions != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BindOptions.Size())) + n10, err := m.BindOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.VolumeOptions != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.VolumeOptions.Size())) + n11, err := m.VolumeOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.TmpfsOptions != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TmpfsOptions.Size())) + n12, err := m.TmpfsOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Consistency != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Consistency)) + } + return i, nil +} + +func (m *Mount_BindOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount_BindOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Propagation != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Propagation)) + } + return i, nil +} + +func (m *Mount_VolumeOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount_VolumeOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NoCopy { + dAtA[i] = 0x8 + i++ + if m.NoCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.DriverConfig != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DriverConfig.Size())) + n13, err := m.DriverConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} + +func (m *Mount_TmpfsOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount_TmpfsOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SizeBytes != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SizeBytes)) + } + if m.Mode != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Mode)) + } + return i, nil +} + +func (m *RestartPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RestartPolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Condition != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Condition)) + } + if m.Delay != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Delay.Size())) + n14, err := m.Delay.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.MaxAttempts != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxAttempts)) + } + if m.Window != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Window.Size())) + n15, err := m.Window.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} + +func (m *UpdateConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Parallelism != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Parallelism)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(types.SizeOfStdDuration(m.Delay))) + n16, err := types.StdDurationMarshalTo(m.Delay, dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + if m.FailureAction != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.FailureAction)) + } + if m.Monitor != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Monitor.Size())) + n17, err := m.Monitor.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.MaxFailureRatio != 0 { + dAtA[i] = 0x2d + i++ + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.MaxFailureRatio)))) + i += 4 + } + if m.Order != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Order)) + } + return i, nil +} + +func (m *UpdateStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if m.StartedAt != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.StartedAt.Size())) + n18, err := m.StartedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.CompletedAt != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CompletedAt.Size())) + n19, err := m.CompletedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if len(m.Message) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + return i, nil +} + +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if m.PID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PID)) + } + if m.ExitCode != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ExitCode)) + } + return i, nil +} + +func (m *PortStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TaskStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Timestamp != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp.Size())) + n20, err := m.Timestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.State != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + if len(m.Err) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Err))) + i += copy(dAtA[i:], m.Err) + } + if m.RuntimeStatus != nil { + nn21, err := m.RuntimeStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn21 + } + if m.PortStatus != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PortStatus.Size())) + n22, err := m.PortStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if len(m.AppliedBy) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppliedBy))) + i += copy(dAtA[i:], m.AppliedBy) + } + if m.AppliedAt != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.AppliedAt.Size())) + n23, err := m.AppliedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + return i, nil +} + +func (m *TaskStatus_Container) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Container != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Container.Size())) + n24, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + return i, nil +} +func (m *NetworkAttachmentConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkAttachmentConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Target) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Target))) + i += copy(dAtA[i:], m.Target) + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, _ := range m.DriverAttachmentOpts { + dAtA[i] = 0x22 + i++ + v := m.DriverAttachmentOpts[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *IPAMConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAMConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Family != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Family)) + } + if len(m.Subnet) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Subnet))) + i += copy(dAtA[i:], m.Subnet) + } + if len(m.Range) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Range))) + i += copy(dAtA[i:], m.Range) + } + if len(m.Gateway) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Gateway))) + i += copy(dAtA[i:], m.Gateway) + } + if len(m.Reserved) > 0 { + for k, _ := range m.Reserved { + dAtA[i] = 0x2a + i++ + v := m.Reserved[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *PortConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Protocol != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Protocol)) + } + if m.TargetPort != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TargetPort)) + } + if m.PublishedPort != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PublishedPort)) + } + if m.PublishMode != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PublishMode)) + } + return i, nil +} + +func (m *Driver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Driver) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Options) > 0 { + for k, _ := range m.Options { + dAtA[i] = 0x12 + i++ + v := m.Options[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *IPAMOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAMOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Driver != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Driver.Size())) + n25, err := m.Driver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Peer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Peer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *WeightedPeer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedPeer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Peer != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Peer.Size())) + n26, err := m.Peer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.Weight != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Weight)) + } + return i, nil +} + +func (m *IssuanceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IssuanceStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if len(m.Err) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Err))) + i += copy(dAtA[i:], m.Err) + } + return i, nil +} + +func (m *AcceptancePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AcceptancePolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Policies) > 0 { + for _, msg := range m.Policies { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Role)) + } + if m.Autoaccept { + dAtA[i] = 0x10 + i++ + if m.Autoaccept { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Secret != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Secret.Size())) + n27, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + return i, nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Alg) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Alg))) + i += copy(dAtA[i:], m.Alg) + } + return i, nil +} + +func (m *ExternalCA) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalCA) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Protocol != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Protocol)) + } + if len(m.URL) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.URL))) + i += copy(dAtA[i:], m.URL) + } + if len(m.Options) > 0 { + for k, _ := range m.Options { + dAtA[i] = 0x1a + i++ + v := m.Options[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.CACert) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACert))) + i += copy(dAtA[i:], m.CACert) + } + return i, nil +} + +func (m *CAConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CAConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NodeCertExpiry != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NodeCertExpiry.Size())) + n28, err := m.NodeCertExpiry.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if len(m.ExternalCAs) > 0 { + for _, msg := range m.ExternalCAs { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.SigningCACert) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SigningCACert))) + i += copy(dAtA[i:], m.SigningCACert) + } + if len(m.SigningCAKey) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SigningCAKey))) + i += copy(dAtA[i:], m.SigningCAKey) + } + if m.ForceRotate != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ForceRotate)) + } + return i, nil +} + +func (m *OrchestrationConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrchestrationConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TaskHistoryRetentionLimit != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TaskHistoryRetentionLimit)) + } + return i, nil +} + +func (m *TaskDefaults) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskDefaults) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LogDriver != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LogDriver.Size())) + n29, err := m.LogDriver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func (m *DispatcherConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DispatcherConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HeartbeatPeriod != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.HeartbeatPeriod.Size())) + n30, err := m.HeartbeatPeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *RaftConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SnapshotInterval != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SnapshotInterval)) + } + if m.KeepOldSnapshots != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.KeepOldSnapshots)) + } + if m.LogEntriesForSlowFollowers != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LogEntriesForSlowFollowers)) + } + if m.HeartbeatTick != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.HeartbeatTick)) + } + if m.ElectionTick != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ElectionTick)) + } + return i, nil +} + +func (m *EncryptionConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EncryptionConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.AutoLockManagers { + dAtA[i] = 0x8 + i++ + if m.AutoLockManagers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SpreadOver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SpreadOver) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SpreadDescriptor) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SpreadDescriptor))) + i += copy(dAtA[i:], m.SpreadDescriptor) + } + return i, nil +} + +func (m *PlacementPreference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PlacementPreference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Preference != nil { + nn31, err := m.Preference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn31 + } + return i, nil +} + +func (m *PlacementPreference_Spread) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Spread != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Spread.Size())) + n32, err := m.Spread.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} +func (m *Placement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Placement) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Constraints) > 0 { + for _, s := range m.Constraints { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Preferences) > 0 { + for _, msg := range m.Preferences { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Platforms) > 0 { + for _, msg := range m.Platforms { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *JoinTokens) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JoinTokens) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Worker) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Worker))) + i += copy(dAtA[i:], m.Worker) + } + if len(m.Manager) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Manager))) + i += copy(dAtA[i:], m.Manager) + } + return i, nil +} + +func (m *RootCA) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RootCA) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CAKey) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CAKey))) + i += copy(dAtA[i:], m.CAKey) + } + if len(m.CACert) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACert))) + i += copy(dAtA[i:], m.CACert) + } + if len(m.CACertHash) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACertHash))) + i += copy(dAtA[i:], m.CACertHash) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.JoinTokens.Size())) + n33, err := m.JoinTokens.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + if m.RootRotation != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.RootRotation.Size())) + n34, err := m.RootRotation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if m.LastForcedRotation != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LastForcedRotation)) + } + return i, nil +} + +func (m *Certificate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Certificate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Role)) + } + if len(m.CSR) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CSR))) + i += copy(dAtA[i:], m.CSR) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Status.Size())) + n35, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + if len(m.Certificate) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Certificate))) + i += copy(dAtA[i:], m.Certificate) + } + if len(m.CN) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CN))) + i += copy(dAtA[i:], m.CN) + } + return i, nil +} + +func (m *EncryptionKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EncryptionKey) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Subsystem) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Subsystem))) + i += copy(dAtA[i:], m.Subsystem) + } + if m.Algorithm != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Algorithm)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.LamportTime != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LamportTime)) + } + return i, nil +} + +func (m *ManagerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManagerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.RaftID)) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + if m.Leader { + dAtA[i] = 0x18 + i++ + if m.Leader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Reachability != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Reachability)) + } + return i, nil +} + +func (m *FileTarget) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileTarget) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.UID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + } + if len(m.GID) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.GID))) + i += copy(dAtA[i:], m.GID) + } + if m.Mode != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Mode)) + } + return i, nil +} + +func (m *SecretReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + if len(m.SecretName) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + } + if m.Target != nil { + nn36, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn36 + } + return i, nil +} + +func (m *SecretReference_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.File != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.File.Size())) + n37, err := m.File.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} +func (m *ConfigReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + if len(m.ConfigName) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConfigName))) + i += copy(dAtA[i:], m.ConfigName) + } + if m.Target != nil { + nn38, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn38 + } + return i, nil +} + +func (m *ConfigReference_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.File != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.File.Size())) + n39, err := m.File.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + return i, nil +} +func (m *BlacklistedCertificate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlacklistedCertificate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Expiry != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Expiry.Size())) + n40, err := m.Expiry.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + return i, nil +} + +func (m *HealthConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Test) > 0 { + for _, s := range m.Test { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Interval != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Interval.Size())) + n41, err := m.Interval.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + if m.Timeout != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Timeout.Size())) + n42, err := m.Timeout.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + if m.Retries != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Retries)) + } + if m.StartPeriod != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.StartPeriod.Size())) + n43, err := m.StartPeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + return i, nil +} + +func (m *MaybeEncryptedRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MaybeEncryptedRecord) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Algorithm != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Algorithm)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Nonce) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Nonce))) + i += copy(dAtA[i:], m.Nonce) + } + return i, nil +} + +func (m *RootRotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RootRotation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CACert) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACert))) + i += copy(dAtA[i:], m.CACert) + } + if len(m.CAKey) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CAKey))) + i += copy(dAtA[i:], m.CAKey) + } + if len(m.CrossSignedCACert) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CrossSignedCACert))) + i += copy(dAtA[i:], m.CrossSignedCACert) + } + return i, nil +} + +func (m *Privileges) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Privileges) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.CredentialSpec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CredentialSpec.Size())) + n44, err := m.CredentialSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + if m.SELinuxContext != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SELinuxContext.Size())) + n45, err := m.SELinuxContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + return i, nil +} + +func (m *Privileges_CredentialSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Privileges_CredentialSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Source != nil { + nn46, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn46 + } + return i, nil +} + +func (m *Privileges_CredentialSpec_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.File))) + i += copy(dAtA[i:], m.File) + return i, nil +} +func (m *Privileges_CredentialSpec_Registry) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Registry))) + i += copy(dAtA[i:], m.Registry) + return i, nil +} +func (m *Privileges_SELinuxContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Privileges_SELinuxContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Disable { + dAtA[i] = 0x8 + i++ + if m.Disable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.User) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Role) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + if len(m.Type) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Level) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Level))) + i += copy(dAtA[i:], m.Level) + } + return i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *Version) Size() (n int) { + var l int + _ = l + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + return n +} + +func (m *IndexEntry) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Val) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Annotations) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + if len(m.Indices) > 0 { + for _, e := range m.Indices { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *NamedGenericResource) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *DiscreteGenericResource) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Value != 0 { + n += 1 + sovTypes(uint64(m.Value)) + } + return n +} + +func (m *GenericResource) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + n += m.Resource.Size() + } + return n +} + +func (m *GenericResource_NamedResourceSpec) Size() (n int) { + var l int + _ = l + if m.NamedResourceSpec != nil { + l = m.NamedResourceSpec.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *GenericResource_DiscreteResourceSpec) Size() (n int) { + var l int + _ = l + if m.DiscreteResourceSpec != nil { + l = m.DiscreteResourceSpec.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Resources) Size() (n int) { + var l int + _ = l + if m.NanoCPUs != 0 { + n += 1 + sovTypes(uint64(m.NanoCPUs)) + } + if m.MemoryBytes != 0 { + n += 1 + sovTypes(uint64(m.MemoryBytes)) + } + if len(m.Generic) > 0 { + for _, e := range m.Generic { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + var l int + _ = l + if m.Limits != nil { + l = m.Limits.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Reservations != nil { + l = m.Reservations.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Platform) Size() (n int) { + var l int + _ = l + l = len(m.Architecture) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.OS) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PluginDescription) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *EngineDescription) Size() (n int) { + var l int + _ = l + l = len(m.EngineVersion) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + if len(m.Plugins) > 0 { + for _, e := range m.Plugins { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *NodeDescription) Size() (n int) { + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Platform != nil { + l = m.Platform.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Engine != nil { + l = m.Engine.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.TLSInfo != nil { + l = m.TLSInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.FIPS { + n += 2 + } + return n +} + +func (m *NodeTLSInfo) Size() (n int) { + var l int + _ = l + l = len(m.TrustRoot) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CertIssuerSubject) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CertIssuerPublicKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RaftMemberStatus) Size() (n int) { + var l int + _ = l + if m.Leader { + n += 2 + } + if m.Reachability != 0 { + n += 1 + sovTypes(uint64(m.Reachability)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *NodeStatus) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Image) Size() (n int) { + var l int + _ = l + l = len(m.Reference) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Mount) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.Source) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Target) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ReadOnly { + n += 2 + } + if m.BindOptions != nil { + l = m.BindOptions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.VolumeOptions != nil { + l = m.VolumeOptions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.TmpfsOptions != nil { + l = m.TmpfsOptions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Consistency != 0 { + n += 1 + sovTypes(uint64(m.Consistency)) + } + return n +} + +func (m *Mount_BindOptions) Size() (n int) { + var l int + _ = l + if m.Propagation != 0 { + n += 1 + sovTypes(uint64(m.Propagation)) + } + return n +} + +func (m *Mount_VolumeOptions) Size() (n int) { + var l int + _ = l + if m.NoCopy { + n += 2 + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + if m.DriverConfig != nil { + l = m.DriverConfig.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Mount_TmpfsOptions) Size() (n int) { + var l int + _ = l + if m.SizeBytes != 0 { + n += 1 + sovTypes(uint64(m.SizeBytes)) + } + if m.Mode != 0 { + n += 1 + sovTypes(uint64(m.Mode)) + } + return n +} + +func (m *RestartPolicy) Size() (n int) { + var l int + _ = l + if m.Condition != 0 { + n += 1 + sovTypes(uint64(m.Condition)) + } + if m.Delay != nil { + l = m.Delay.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.MaxAttempts != 0 { + n += 1 + sovTypes(uint64(m.MaxAttempts)) + } + if m.Window != nil { + l = m.Window.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *UpdateConfig) Size() (n int) { + var l int + _ = l + if m.Parallelism != 0 { + n += 1 + sovTypes(uint64(m.Parallelism)) + } + l = types.SizeOfStdDuration(m.Delay) + n += 1 + l + sovTypes(uint64(l)) + if m.FailureAction != 0 { + n += 1 + sovTypes(uint64(m.FailureAction)) + } + if m.Monitor != nil { + l = m.Monitor.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.MaxFailureRatio != 0 { + n += 5 + } + if m.Order != 0 { + n += 1 + sovTypes(uint64(m.Order)) + } + return n +} + +func (m *UpdateStatus) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + if m.StartedAt != nil { + l = m.StartedAt.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.CompletedAt != nil { + l = m.CompletedAt.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ContainerStatus) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.PID != 0 { + n += 1 + sovTypes(uint64(m.PID)) + } + if m.ExitCode != 0 { + n += 1 + sovTypes(uint64(m.ExitCode)) + } + return n +} + +func (m *PortStatus) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *TaskStatus) Size() (n int) { + var l int + _ = l + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Err) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.RuntimeStatus != nil { + n += m.RuntimeStatus.Size() + } + if m.PortStatus != nil { + l = m.PortStatus.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppliedBy) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.AppliedAt != nil { + l = m.AppliedAt.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *TaskStatus_Container) Size() (n int) { + var l int + _ = l + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *NetworkAttachmentConfig) Size() (n int) { + var l int + _ = l + l = len(m.Target) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, v := range m.DriverAttachmentOpts { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + return n +} + +func (m *IPAMConfig) Size() (n int) { + var l int + _ = l + if m.Family != 0 { + n += 1 + sovTypes(uint64(m.Family)) + } + l = len(m.Subnet) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Range) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Gateway) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Reserved) > 0 { + for k, v := range m.Reserved { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PortConfig) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Protocol != 0 { + n += 1 + sovTypes(uint64(m.Protocol)) + } + if m.TargetPort != 0 { + n += 1 + sovTypes(uint64(m.TargetPort)) + } + if m.PublishedPort != 0 { + n += 1 + sovTypes(uint64(m.PublishedPort)) + } + if m.PublishMode != 0 { + n += 1 + sovTypes(uint64(m.PublishMode)) + } + return n +} + +func (m *Driver) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + return n +} + +func (m *IPAMOptions) Size() (n int) { + var l int + _ = l + if m.Driver != nil { + l = m.Driver.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Peer) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *WeightedPeer) Size() (n int) { + var l int + _ = l + if m.Peer != nil { + l = m.Peer.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Weight != 0 { + n += 1 + sovTypes(uint64(m.Weight)) + } + return n +} + +func (m *IssuanceStatus) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + l = len(m.Err) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *AcceptancePolicy) Size() (n int) { + var l int + _ = l + if len(m.Policies) > 0 { + for _, e := range m.Policies { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Size() (n int) { + var l int + _ = l + if m.Role != 0 { + n += 1 + sovTypes(uint64(m.Role)) + } + if m.Autoaccept { + n += 2 + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Alg) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ExternalCA) Size() (n int) { + var l int + _ = l + if m.Protocol != 0 { + n += 1 + sovTypes(uint64(m.Protocol)) + } + l = len(m.URL) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + l = len(m.CACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *CAConfig) Size() (n int) { + var l int + _ = l + if m.NodeCertExpiry != nil { + l = m.NodeCertExpiry.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.ExternalCAs) > 0 { + for _, e := range m.ExternalCAs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.SigningCACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.SigningCAKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ForceRotate != 0 { + n += 1 + sovTypes(uint64(m.ForceRotate)) + } + return n +} + +func (m *OrchestrationConfig) Size() (n int) { + var l int + _ = l + if m.TaskHistoryRetentionLimit != 0 { + n += 1 + sovTypes(uint64(m.TaskHistoryRetentionLimit)) + } + return n +} + +func (m *TaskDefaults) Size() (n int) { + var l int + _ = l + if m.LogDriver != nil { + l = m.LogDriver.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *DispatcherConfig) Size() (n int) { + var l int + _ = l + if m.HeartbeatPeriod != nil { + l = m.HeartbeatPeriod.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RaftConfig) Size() (n int) { + var l int + _ = l + if m.SnapshotInterval != 0 { + n += 1 + sovTypes(uint64(m.SnapshotInterval)) + } + if m.KeepOldSnapshots != 0 { + n += 1 + sovTypes(uint64(m.KeepOldSnapshots)) + } + if m.LogEntriesForSlowFollowers != 0 { + n += 1 + sovTypes(uint64(m.LogEntriesForSlowFollowers)) + } + if m.HeartbeatTick != 0 { + n += 1 + sovTypes(uint64(m.HeartbeatTick)) + } + if m.ElectionTick != 0 { + n += 1 + sovTypes(uint64(m.ElectionTick)) + } + return n +} + +func (m *EncryptionConfig) Size() (n int) { + var l int + _ = l + if m.AutoLockManagers { + n += 2 + } + return n +} + +func (m *SpreadOver) Size() (n int) { + var l int + _ = l + l = len(m.SpreadDescriptor) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PlacementPreference) Size() (n int) { + var l int + _ = l + if m.Preference != nil { + n += m.Preference.Size() + } + return n +} + +func (m *PlacementPreference_Spread) Size() (n int) { + var l int + _ = l + if m.Spread != nil { + l = m.Spread.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Placement) Size() (n int) { + var l int + _ = l + if len(m.Constraints) > 0 { + for _, s := range m.Constraints { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Preferences) > 0 { + for _, e := range m.Preferences { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Platforms) > 0 { + for _, e := range m.Platforms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *JoinTokens) Size() (n int) { + var l int + _ = l + l = len(m.Worker) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Manager) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RootCA) Size() (n int) { + var l int + _ = l + l = len(m.CAKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CACertHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.JoinTokens.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.RootRotation != nil { + l = m.RootRotation.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastForcedRotation != 0 { + n += 1 + sovTypes(uint64(m.LastForcedRotation)) + } + return n +} + +func (m *Certificate) Size() (n int) { + var l int + _ = l + if m.Role != 0 { + n += 1 + sovTypes(uint64(m.Role)) + } + l = len(m.CSR) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Status.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Certificate) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CN) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *EncryptionKey) Size() (n int) { + var l int + _ = l + l = len(m.Subsystem) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Algorithm != 0 { + n += 1 + sovTypes(uint64(m.Algorithm)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.LamportTime != 0 { + n += 1 + sovTypes(uint64(m.LamportTime)) + } + return n +} + +func (m *ManagerStatus) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovTypes(uint64(m.RaftID)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Leader { + n += 2 + } + if m.Reachability != 0 { + n += 1 + sovTypes(uint64(m.Reachability)) + } + return n +} + +func (m *FileTarget) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.UID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.GID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovTypes(uint64(m.Mode)) + } + return n +} + +func (m *SecretReference) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.SecretName) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Target != nil { + n += m.Target.Size() + } + return n +} + +func (m *SecretReference_File) Size() (n int) { + var l int + _ = l + if m.File != nil { + l = m.File.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *ConfigReference) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ConfigName) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Target != nil { + n += m.Target.Size() + } + return n +} + +func (m *ConfigReference_File) Size() (n int) { + var l int + _ = l + if m.File != nil { + l = m.File.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *BlacklistedCertificate) Size() (n int) { + var l int + _ = l + if m.Expiry != nil { + l = m.Expiry.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *HealthConfig) Size() (n int) { + var l int + _ = l + if len(m.Test) > 0 { + for _, s := range m.Test { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.Interval != nil { + l = m.Interval.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Timeout != nil { + l = m.Timeout.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Retries != 0 { + n += 1 + sovTypes(uint64(m.Retries)) + } + if m.StartPeriod != nil { + l = m.StartPeriod.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *MaybeEncryptedRecord) Size() (n int) { + var l int + _ = l + if m.Algorithm != 0 { + n += 1 + sovTypes(uint64(m.Algorithm)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Nonce) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RootRotation) Size() (n int) { + var l int + _ = l + l = len(m.CACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CAKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CrossSignedCACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Privileges) Size() (n int) { + var l int + _ = l + if m.CredentialSpec != nil { + l = m.CredentialSpec.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.SELinuxContext != nil { + l = m.SELinuxContext.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Privileges_CredentialSpec) Size() (n int) { + var l int + _ = l + if m.Source != nil { + n += m.Source.Size() + } + return n +} + +func (m *Privileges_CredentialSpec_File) Size() (n int) { + var l int + _ = l + l = len(m.File) + n += 1 + l + sovTypes(uint64(l)) + return n +} +func (m *Privileges_CredentialSpec_Registry) Size() (n int) { + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovTypes(uint64(l)) + return n +} +func (m *Privileges_SELinuxContext) Size() (n int) { + var l int + _ = l + if m.Disable { + n += 2 + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Level) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Version) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Version{`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func (this *IndexEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IndexEntry{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Val:` + fmt.Sprintf("%v", this.Val) + `,`, + `}`, + }, "") + return s +} +func (this *Annotations) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Annotations{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Indices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Indices), "IndexEntry", "IndexEntry", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedGenericResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedGenericResource{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *DiscreteGenericResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DiscreteGenericResource{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *GenericResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericResource{`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `}`, + }, "") + return s +} +func (this *GenericResource_NamedResourceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericResource_NamedResourceSpec{`, + `NamedResourceSpec:` + strings.Replace(fmt.Sprintf("%v", this.NamedResourceSpec), "NamedGenericResource", "NamedGenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GenericResource_DiscreteResourceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericResource_DiscreteResourceSpec{`, + `DiscreteResourceSpec:` + strings.Replace(fmt.Sprintf("%v", this.DiscreteResourceSpec), "DiscreteGenericResource", "DiscreteGenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Resources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resources{`, + `NanoCPUs:` + fmt.Sprintf("%v", this.NanoCPUs) + `,`, + `MemoryBytes:` + fmt.Sprintf("%v", this.MemoryBytes) + `,`, + `Generic:` + strings.Replace(fmt.Sprintf("%v", this.Generic), "GenericResource", "GenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + strings.Replace(fmt.Sprintf("%v", this.Limits), "Resources", "Resources", 1) + `,`, + `Reservations:` + strings.Replace(fmt.Sprintf("%v", this.Reservations), "Resources", "Resources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Platform) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Platform{`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `OS:` + fmt.Sprintf("%v", this.OS) + `,`, + `}`, + }, "") + return s +} +func (this *PluginDescription) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PluginDescription{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *EngineDescription) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&EngineDescription{`, + `EngineVersion:` + fmt.Sprintf("%v", this.EngineVersion) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Plugins:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Plugins), "PluginDescription", "PluginDescription", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeDescription) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeDescription{`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Platform:` + strings.Replace(fmt.Sprintf("%v", this.Platform), "Platform", "Platform", 1) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resources", "Resources", 1) + `,`, + `Engine:` + strings.Replace(fmt.Sprintf("%v", this.Engine), "EngineDescription", "EngineDescription", 1) + `,`, + `TLSInfo:` + strings.Replace(fmt.Sprintf("%v", this.TLSInfo), "NodeTLSInfo", "NodeTLSInfo", 1) + `,`, + `FIPS:` + fmt.Sprintf("%v", this.FIPS) + `,`, + `}`, + }, "") + return s +} +func (this *NodeTLSInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeTLSInfo{`, + `TrustRoot:` + fmt.Sprintf("%v", this.TrustRoot) + `,`, + `CertIssuerSubject:` + fmt.Sprintf("%v", this.CertIssuerSubject) + `,`, + `CertIssuerPublicKey:` + fmt.Sprintf("%v", this.CertIssuerPublicKey) + `,`, + `}`, + }, "") + return s +} +func (this *RaftMemberStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftMemberStatus{`, + `Leader:` + fmt.Sprintf("%v", this.Leader) + `,`, + `Reachability:` + fmt.Sprintf("%v", this.Reachability) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *Image) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Image{`, + `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, + `}`, + }, "") + return s +} +func (this *Mount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `BindOptions:` + strings.Replace(fmt.Sprintf("%v", this.BindOptions), "Mount_BindOptions", "Mount_BindOptions", 1) + `,`, + `VolumeOptions:` + strings.Replace(fmt.Sprintf("%v", this.VolumeOptions), "Mount_VolumeOptions", "Mount_VolumeOptions", 1) + `,`, + `TmpfsOptions:` + strings.Replace(fmt.Sprintf("%v", this.TmpfsOptions), "Mount_TmpfsOptions", "Mount_TmpfsOptions", 1) + `,`, + `Consistency:` + fmt.Sprintf("%v", this.Consistency) + `,`, + `}`, + }, "") + return s +} +func (this *Mount_BindOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount_BindOptions{`, + `Propagation:` + fmt.Sprintf("%v", this.Propagation) + `,`, + `}`, + }, "") + return s +} +func (this *Mount_VolumeOptions) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Mount_VolumeOptions{`, + `NoCopy:` + fmt.Sprintf("%v", this.NoCopy) + `,`, + `Labels:` + mapStringForLabels + `,`, + `DriverConfig:` + strings.Replace(fmt.Sprintf("%v", this.DriverConfig), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Mount_TmpfsOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount_TmpfsOptions{`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *RestartPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RestartPolicy{`, + `Condition:` + fmt.Sprintf("%v", this.Condition) + `,`, + `Delay:` + strings.Replace(fmt.Sprintf("%v", this.Delay), "Duration", "google_protobuf1.Duration", 1) + `,`, + `MaxAttempts:` + fmt.Sprintf("%v", this.MaxAttempts) + `,`, + `Window:` + strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "google_protobuf1.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfig{`, + `Parallelism:` + fmt.Sprintf("%v", this.Parallelism) + `,`, + `Delay:` + strings.Replace(strings.Replace(this.Delay.String(), "Duration", "google_protobuf1.Duration", 1), `&`, ``, 1) + `,`, + `FailureAction:` + fmt.Sprintf("%v", this.FailureAction) + `,`, + `Monitor:` + strings.Replace(fmt.Sprintf("%v", this.Monitor), "Duration", "google_protobuf1.Duration", 1) + `,`, + `MaxFailureRatio:` + fmt.Sprintf("%v", this.MaxFailureRatio) + `,`, + `Order:` + fmt.Sprintf("%v", this.Order) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `StartedAt:` + strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `CompletedAt:` + strings.Replace(fmt.Sprintf("%v", this.CompletedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `PID:` + fmt.Sprintf("%v", this.PID) + `,`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `}`, + }, "") + return s +} +func (this *PortStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortStatus{`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskStatus{`, + `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Err:` + fmt.Sprintf("%v", this.Err) + `,`, + `RuntimeStatus:` + fmt.Sprintf("%v", this.RuntimeStatus) + `,`, + `PortStatus:` + strings.Replace(fmt.Sprintf("%v", this.PortStatus), "PortStatus", "PortStatus", 1) + `,`, + `AppliedBy:` + fmt.Sprintf("%v", this.AppliedBy) + `,`, + `AppliedAt:` + strings.Replace(fmt.Sprintf("%v", this.AppliedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskStatus_Container) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskStatus_Container{`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerStatus", "ContainerStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkAttachmentConfig) String() string { + if this == nil { + return "nil" + } + keysForDriverAttachmentOpts := make([]string, 0, len(this.DriverAttachmentOpts)) + for k, _ := range this.DriverAttachmentOpts { + keysForDriverAttachmentOpts = append(keysForDriverAttachmentOpts, k) + } + sortkeys.Strings(keysForDriverAttachmentOpts) + mapStringForDriverAttachmentOpts := "map[string]string{" + for _, k := range keysForDriverAttachmentOpts { + mapStringForDriverAttachmentOpts += fmt.Sprintf("%v: %v,", k, this.DriverAttachmentOpts[k]) + } + mapStringForDriverAttachmentOpts += "}" + s := strings.Join([]string{`&NetworkAttachmentConfig{`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `DriverAttachmentOpts:` + mapStringForDriverAttachmentOpts + `,`, + `}`, + }, "") + return s +} +func (this *IPAMConfig) String() string { + if this == nil { + return "nil" + } + keysForReserved := make([]string, 0, len(this.Reserved)) + for k, _ := range this.Reserved { + keysForReserved = append(keysForReserved, k) + } + sortkeys.Strings(keysForReserved) + mapStringForReserved := "map[string]string{" + for _, k := range keysForReserved { + mapStringForReserved += fmt.Sprintf("%v: %v,", k, this.Reserved[k]) + } + mapStringForReserved += "}" + s := strings.Join([]string{`&IPAMConfig{`, + `Family:` + fmt.Sprintf("%v", this.Family) + `,`, + `Subnet:` + fmt.Sprintf("%v", this.Subnet) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `Reserved:` + mapStringForReserved + `,`, + `}`, + }, "") + return s +} +func (this *PortConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortConfig{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`, + `PublishedPort:` + fmt.Sprintf("%v", this.PublishedPort) + `,`, + `PublishMode:` + fmt.Sprintf("%v", this.PublishMode) + `,`, + `}`, + }, "") + return s +} +func (this *Driver) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k, _ := range this.Options { + keysForOptions = append(keysForOptions, k) + } + sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&Driver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *IPAMOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAMOptions{`, + `Driver:` + strings.Replace(fmt.Sprintf("%v", this.Driver), "Driver", "Driver", 1) + `,`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "IPAMConfig", "IPAMConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Peer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Peer{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *WeightedPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightedPeer{`, + `Peer:` + strings.Replace(fmt.Sprintf("%v", this.Peer), "Peer", "Peer", 1) + `,`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `}`, + }, "") + return s +} +func (this *IssuanceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IssuanceStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Err:` + fmt.Sprintf("%v", this.Err) + `,`, + `}`, + }, "") + return s +} +func (this *AcceptancePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AcceptancePolicy{`, + `Policies:` + strings.Replace(fmt.Sprintf("%v", this.Policies), "AcceptancePolicy_RoleAdmissionPolicy", "AcceptancePolicy_RoleAdmissionPolicy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AcceptancePolicy_RoleAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AcceptancePolicy_RoleAdmissionPolicy{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Autoaccept:` + fmt.Sprintf("%v", this.Autoaccept) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "AcceptancePolicy_RoleAdmissionPolicy_Secret", "AcceptancePolicy_RoleAdmissionPolicy_Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AcceptancePolicy_RoleAdmissionPolicy_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AcceptancePolicy_RoleAdmissionPolicy_Secret{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Alg:` + fmt.Sprintf("%v", this.Alg) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalCA) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k, _ := range this.Options { + keysForOptions = append(keysForOptions, k) + } + sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&ExternalCA{`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Options:` + mapStringForOptions + `,`, + `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`, + `}`, + }, "") + return s +} +func (this *CAConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CAConfig{`, + `NodeCertExpiry:` + strings.Replace(fmt.Sprintf("%v", this.NodeCertExpiry), "Duration", "google_protobuf1.Duration", 1) + `,`, + `ExternalCAs:` + strings.Replace(fmt.Sprintf("%v", this.ExternalCAs), "ExternalCA", "ExternalCA", 1) + `,`, + `SigningCACert:` + fmt.Sprintf("%v", this.SigningCACert) + `,`, + `SigningCAKey:` + fmt.Sprintf("%v", this.SigningCAKey) + `,`, + `ForceRotate:` + fmt.Sprintf("%v", this.ForceRotate) + `,`, + `}`, + }, "") + return s +} +func (this *OrchestrationConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OrchestrationConfig{`, + `TaskHistoryRetentionLimit:` + fmt.Sprintf("%v", this.TaskHistoryRetentionLimit) + `,`, + `}`, + }, "") + return s +} +func (this *TaskDefaults) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskDefaults{`, + `LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DispatcherConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DispatcherConfig{`, + `HeartbeatPeriod:` + strings.Replace(fmt.Sprintf("%v", this.HeartbeatPeriod), "Duration", "google_protobuf1.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RaftConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftConfig{`, + `SnapshotInterval:` + fmt.Sprintf("%v", this.SnapshotInterval) + `,`, + `KeepOldSnapshots:` + fmt.Sprintf("%v", this.KeepOldSnapshots) + `,`, + `LogEntriesForSlowFollowers:` + fmt.Sprintf("%v", this.LogEntriesForSlowFollowers) + `,`, + `HeartbeatTick:` + fmt.Sprintf("%v", this.HeartbeatTick) + `,`, + `ElectionTick:` + fmt.Sprintf("%v", this.ElectionTick) + `,`, + `}`, + }, "") + return s +} +func (this *EncryptionConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EncryptionConfig{`, + `AutoLockManagers:` + fmt.Sprintf("%v", this.AutoLockManagers) + `,`, + `}`, + }, "") + return s +} +func (this *SpreadOver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SpreadOver{`, + `SpreadDescriptor:` + fmt.Sprintf("%v", this.SpreadDescriptor) + `,`, + `}`, + }, "") + return s +} +func (this *PlacementPreference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PlacementPreference{`, + `Preference:` + fmt.Sprintf("%v", this.Preference) + `,`, + `}`, + }, "") + return s +} +func (this *PlacementPreference_Spread) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PlacementPreference_Spread{`, + `Spread:` + strings.Replace(fmt.Sprintf("%v", this.Spread), "SpreadOver", "SpreadOver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Placement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Placement{`, + `Constraints:` + fmt.Sprintf("%v", this.Constraints) + `,`, + `Preferences:` + strings.Replace(fmt.Sprintf("%v", this.Preferences), "PlacementPreference", "PlacementPreference", 1) + `,`, + `Platforms:` + strings.Replace(fmt.Sprintf("%v", this.Platforms), "Platform", "Platform", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JoinTokens) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JoinTokens{`, + `Worker:` + fmt.Sprintf("%v", this.Worker) + `,`, + `Manager:` + fmt.Sprintf("%v", this.Manager) + `,`, + `}`, + }, "") + return s +} +func (this *RootCA) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootCA{`, + `CAKey:` + fmt.Sprintf("%v", this.CAKey) + `,`, + `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`, + `CACertHash:` + fmt.Sprintf("%v", this.CACertHash) + `,`, + `JoinTokens:` + strings.Replace(strings.Replace(this.JoinTokens.String(), "JoinTokens", "JoinTokens", 1), `&`, ``, 1) + `,`, + `RootRotation:` + strings.Replace(fmt.Sprintf("%v", this.RootRotation), "RootRotation", "RootRotation", 1) + `,`, + `LastForcedRotation:` + fmt.Sprintf("%v", this.LastForcedRotation) + `,`, + `}`, + }, "") + return s +} +func (this *Certificate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Certificate{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `CSR:` + fmt.Sprintf("%v", this.CSR) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IssuanceStatus", "IssuanceStatus", 1), `&`, ``, 1) + `,`, + `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`, + `CN:` + fmt.Sprintf("%v", this.CN) + `,`, + `}`, + }, "") + return s +} +func (this *EncryptionKey) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EncryptionKey{`, + `Subsystem:` + fmt.Sprintf("%v", this.Subsystem) + `,`, + `Algorithm:` + fmt.Sprintf("%v", this.Algorithm) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `LamportTime:` + fmt.Sprintf("%v", this.LamportTime) + `,`, + `}`, + }, "") + return s +} +func (this *ManagerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ManagerStatus{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `Leader:` + fmt.Sprintf("%v", this.Leader) + `,`, + `Reachability:` + fmt.Sprintf("%v", this.Reachability) + `,`, + `}`, + }, "") + return s +} +func (this *FileTarget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileTarget{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `GID:` + fmt.Sprintf("%v", this.GID) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference_File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference_File{`, + `File:` + strings.Replace(fmt.Sprintf("%v", this.File), "FileTarget", "FileTarget", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigReference{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `ConfigName:` + fmt.Sprintf("%v", this.ConfigName) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigReference_File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigReference_File{`, + `File:` + strings.Replace(fmt.Sprintf("%v", this.File), "FileTarget", "FileTarget", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BlacklistedCertificate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BlacklistedCertificate{`, + `Expiry:` + strings.Replace(fmt.Sprintf("%v", this.Expiry), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HealthConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthConfig{`, + `Test:` + fmt.Sprintf("%v", this.Test) + `,`, + `Interval:` + strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "google_protobuf1.Duration", 1) + `,`, + `Timeout:` + strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "google_protobuf1.Duration", 1) + `,`, + `Retries:` + fmt.Sprintf("%v", this.Retries) + `,`, + `StartPeriod:` + strings.Replace(fmt.Sprintf("%v", this.StartPeriod), "Duration", "google_protobuf1.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MaybeEncryptedRecord) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MaybeEncryptedRecord{`, + `Algorithm:` + fmt.Sprintf("%v", this.Algorithm) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Nonce:` + fmt.Sprintf("%v", this.Nonce) + `,`, + `}`, + }, "") + return s +} +func (this *RootRotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootRotation{`, + `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`, + `CAKey:` + fmt.Sprintf("%v", this.CAKey) + `,`, + `CrossSignedCACert:` + fmt.Sprintf("%v", this.CrossSignedCACert) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges{`, + `CredentialSpec:` + strings.Replace(fmt.Sprintf("%v", this.CredentialSpec), "Privileges_CredentialSpec", "Privileges_CredentialSpec", 1) + `,`, + `SELinuxContext:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxContext), "Privileges_SELinuxContext", "Privileges_SELinuxContext", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec_File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec_File{`, + `File:` + fmt.Sprintf("%v", this.File) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec_Registry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec_Registry{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_SELinuxContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_SELinuxContext{`, + `Disable:` + fmt.Sprintf("%v", this.Disable) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func valueToStringTypes(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Version) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Version: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IndexEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IndexEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IndexEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Annotations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Annotations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Annotations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Indices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Indices = append(m.Indices, IndexEntry{}) + if err := m.Indices[len(m.Indices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedGenericResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedGenericResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedGenericResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiscreteGenericResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiscreteGenericResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiscreteGenericResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamedResourceSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NamedGenericResource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &GenericResource_NamedResourceSpec{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscreteResourceSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DiscreteGenericResource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &GenericResource_DiscreteResourceSpec{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NanoCPUs", wireType) + } + m.NanoCPUs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NanoCPUs |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryBytes", wireType) + } + m.MemoryBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryBytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Generic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Generic = append(m.Generic, &GenericResource{}) + if err := m.Generic[len(m.Generic)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limits == nil { + m.Limits = &Resources{} + } + if err := m.Limits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reservations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Reservations == nil { + m.Reservations = &Resources{} + } + if err := m.Reservations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Platform) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Platform: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OS = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginDescription) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginDescription: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginDescription: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EngineDescription) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EngineDescription: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EngineDescription: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EngineVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EngineVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plugins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Plugins = append(m.Plugins, PluginDescription{}) + if err := m.Plugins[len(m.Plugins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeDescription) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeDescription: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeDescription: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Platform == nil { + m.Platform = &Platform{} + } + if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &Resources{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Engine", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Engine == nil { + m.Engine = &EngineDescription{} + } + if err := m.Engine.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLSInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLSInfo == nil { + m.TLSInfo = &NodeTLSInfo{} + } + if err := m.TLSInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FIPS", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FIPS = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeTLSInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeTLSInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeTLSInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustRoot", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustRoot = append(m.TrustRoot[:0], dAtA[iNdEx:postIndex]...) + if m.TrustRoot == nil { + m.TrustRoot = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertIssuerSubject", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CertIssuerSubject = append(m.CertIssuerSubject[:0], dAtA[iNdEx:postIndex]...) + if m.CertIssuerSubject == nil { + m.CertIssuerSubject = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertIssuerPublicKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CertIssuerPublicKey = append(m.CertIssuerPublicKey[:0], dAtA[iNdEx:postIndex]...) + if m.CertIssuerPublicKey == nil { + m.CertIssuerPublicKey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftMemberStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftMemberStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftMemberStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Leader = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reachability", wireType) + } + m.Reachability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Reachability |= (RaftMemberStatus_Reachability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (NodeStatus_State(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Image: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Mount_MountType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BindOptions == nil { + m.BindOptions = &Mount_BindOptions{} + } + if err := m.BindOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeOptions == nil { + m.VolumeOptions = &Mount_VolumeOptions{} + } + if err := m.VolumeOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TmpfsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TmpfsOptions == nil { + m.TmpfsOptions = &Mount_TmpfsOptions{} + } + if err := m.TmpfsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Consistency", wireType) + } + m.Consistency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Consistency |= (Mount_MountConsistency(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount_BindOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BindOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BindOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Propagation", wireType) + } + m.Propagation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Propagation |= (Mount_BindOptions_MountPropagation(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount_VolumeOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoCopy = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverConfig == nil { + m.DriverConfig = &Driver{} + } + if err := m.DriverConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount_TmpfsOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TmpfsOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TmpfsOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + } + m.SizeBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SizeBytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (os.FileMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RestartPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RestartPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RestartPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Condition", wireType) + } + m.Condition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Condition |= (RestartPolicy_RestartCondition(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Delay == nil { + m.Delay = &google_protobuf1.Duration{} + } + if err := m.Delay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxAttempts", wireType) + } + m.MaxAttempts = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxAttempts |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Window == nil { + m.Window = &google_protobuf1.Duration{} + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + m.Parallelism = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Parallelism |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdDurationUnmarshal(&m.Delay, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureAction", wireType) + } + m.FailureAction = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FailureAction |= (UpdateConfig_FailureAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Monitor == nil { + m.Monitor = &google_protobuf1.Duration{} + } + if err := m.Monitor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxFailureRatio", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.MaxFailureRatio = float32(math.Float32frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) + } + m.Order = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Order |= (UpdateConfig_UpdateOrder(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (UpdateStatus_UpdateState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartedAt == nil { + m.StartedAt = &google_protobuf.Timestamp{} + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletedAt == nil { + m.CompletedAt = &google_protobuf.Timestamp{} + } + if err := m.CompletedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PID", wireType) + } + m.PID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PID |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + m.ExitCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitCode |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortConfig{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &google_protobuf.Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ContainerStatus{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RuntimeStatus = &TaskStatus_Container{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortStatus == nil { + m.PortStatus = &PortStatus{} + } + if err := m.PortStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliedBy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppliedBy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppliedAt == nil { + m.AppliedAt = &google_protobuf.Timestamp{} + } + if err := m.AppliedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkAttachmentConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkAttachmentConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkAttachmentConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Aliases = append(m.Aliases, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverAttachmentOpts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverAttachmentOpts == nil { + m.DriverAttachmentOpts = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.DriverAttachmentOpts[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAMConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAMConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAMConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Family", wireType) + } + m.Family = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Family |= (IPAMConfig_AddressFamily(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subnet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subnet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Range = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reserved", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Reserved == nil { + m.Reserved = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Reserved[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + m.Protocol = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Protocol |= (PortConfig_Protocol(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + m.TargetPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishedPort", wireType) + } + m.PublishedPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PublishedPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishMode", wireType) + } + m.PublishMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PublishMode |= (PortConfig_PublishMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Driver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Driver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Driver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAMOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAMOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAMOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Driver == nil { + m.Driver = &Driver{} + } + if err := m.Driver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &IPAMConfig{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Peer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Peer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WeightedPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WeightedPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WeightedPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Peer == nil { + m.Peer = &Peer{} + } + if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IssuanceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IssuanceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IssuanceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (IssuanceStatus_State(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AcceptancePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AcceptancePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AcceptancePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Policies = append(m.Policies, &AcceptancePolicy_RoleAdmissionPolicy{}) + if err := m.Policies[len(m.Policies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AcceptancePolicy_RoleAdmissionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleAdmissionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Autoaccept", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Autoaccept = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &AcceptancePolicy_RoleAdmissionPolicy_Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Alg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalCA) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalCA: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalCA: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + m.Protocol = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Protocol |= (ExternalCA_CAProtocol(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACert = append(m.CACert[:0], dAtA[iNdEx:postIndex]...) + if m.CACert == nil { + m.CACert = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CAConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CAConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CAConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeCertExpiry", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeCertExpiry == nil { + m.NodeCertExpiry = &google_protobuf1.Duration{} + } + if err := m.NodeCertExpiry.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCAs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalCAs = append(m.ExternalCAs, &ExternalCA{}) + if err := m.ExternalCAs[len(m.ExternalCAs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningCACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SigningCACert = append(m.SigningCACert[:0], dAtA[iNdEx:postIndex]...) + if m.SigningCACert == nil { + m.SigningCACert = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningCAKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SigningCAKey = append(m.SigningCAKey[:0], dAtA[iNdEx:postIndex]...) + if m.SigningCAKey == nil { + m.SigningCAKey = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceRotate", wireType) + } + m.ForceRotate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ForceRotate |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OrchestrationConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OrchestrationConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OrchestrationConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskHistoryRetentionLimit", wireType) + } + m.TaskHistoryRetentionLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TaskHistoryRetentionLimit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskDefaults) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskDefaults: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskDefaults: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogDriver == nil { + m.LogDriver = &Driver{} + } + if err := m.LogDriver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DispatcherConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DispatcherConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DispatcherConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HeartbeatPeriod == nil { + m.HeartbeatPeriod = &google_protobuf1.Duration{} + } + if err := m.HeartbeatPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotInterval", wireType) + } + m.SnapshotInterval = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SnapshotInterval |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepOldSnapshots", wireType) + } + m.KeepOldSnapshots = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeepOldSnapshots |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogEntriesForSlowFollowers", wireType) + } + m.LogEntriesForSlowFollowers = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogEntriesForSlowFollowers |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatTick", wireType) + } + m.HeartbeatTick = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HeartbeatTick |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ElectionTick", wireType) + } + m.ElectionTick = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ElectionTick |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EncryptionConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EncryptionConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EncryptionConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoLockManagers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoLockManagers = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SpreadOver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SpreadOver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SpreadOver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpreadDescriptor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpreadDescriptor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlacementPreference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlacementPreference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlacementPreference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spread", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SpreadOver{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Preference = &PlacementPreference_Spread{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Placement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Placement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Placement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Preferences = append(m.Preferences, &PlacementPreference{}) + if err := m.Preferences[len(m.Preferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Platforms = append(m.Platforms, &Platform{}) + if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JoinTokens) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JoinTokens: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JoinTokens: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Worker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Worker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manager", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootCA) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootCA: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootCA: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CAKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CAKey = append(m.CAKey[:0], dAtA[iNdEx:postIndex]...) + if m.CAKey == nil { + m.CAKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACert = append(m.CACert[:0], dAtA[iNdEx:postIndex]...) + if m.CACert == nil { + m.CACert = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACertHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACertHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JoinTokens", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.JoinTokens.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootRotation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RootRotation == nil { + m.RootRotation = &RootRotation{} + } + if err := m.RootRotation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastForcedRotation", wireType) + } + m.LastForcedRotation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastForcedRotation |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Certificate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Certificate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Certificate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CSR = append(m.CSR[:0], dAtA[iNdEx:postIndex]...) + if m.CSR == nil { + m.CSR = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) + if m.Certificate == nil { + m.Certificate = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EncryptionKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EncryptionKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EncryptionKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subsystem", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subsystem = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Algorithm", wireType) + } + m.Algorithm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Algorithm |= (EncryptionKey_Algorithm(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LamportTime", wireType) + } + m.LamportTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LamportTime |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManagerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManagerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManagerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Leader = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reachability", wireType) + } + m.Reachability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Reachability |= (RaftMemberStatus_Reachability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileTarget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileTarget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileTarget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (os.FileMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileTarget{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &SecretReference_File{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileTarget{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &ConfigReference_File{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlacklistedCertificate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlacklistedCertificate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlacklistedCertificate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expiry", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Expiry == nil { + m.Expiry = &google_protobuf.Timestamp{} + } + if err := m.Expiry.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Test", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Test = append(m.Test, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Interval == nil { + m.Interval = &google_protobuf1.Duration{} + } + if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timeout == nil { + m.Timeout = &google_protobuf1.Duration{} + } + if err := m.Timeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Retries", wireType) + } + m.Retries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Retries |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartPeriod == nil { + m.StartPeriod = &google_protobuf1.Duration{} + } + if err := m.StartPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaybeEncryptedRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaybeEncryptedRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaybeEncryptedRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Algorithm", wireType) + } + m.Algorithm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Algorithm |= (MaybeEncryptedRecord_Algorithm(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonce", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nonce = append(m.Nonce[:0], dAtA[iNdEx:postIndex]...) + if m.Nonce == nil { + m.Nonce = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootRotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootRotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootRotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACert = append(m.CACert[:0], dAtA[iNdEx:postIndex]...) + if m.CACert == nil { + m.CACert = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CAKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CAKey = append(m.CAKey[:0], dAtA[iNdEx:postIndex]...) + if m.CAKey == nil { + m.CAKey = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CrossSignedCACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CrossSignedCACert = append(m.CrossSignedCACert[:0], dAtA[iNdEx:postIndex]...) + if m.CrossSignedCACert == nil { + m.CrossSignedCACert = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Privileges) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Privileges: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Privileges: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CredentialSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CredentialSpec == nil { + m.CredentialSpec = &Privileges_CredentialSpec{} + } + if err := m.CredentialSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxContext == nil { + m.SELinuxContext = &Privileges_SELinuxContext{} + } + if err := m.SELinuxContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Privileges_CredentialSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CredentialSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CredentialSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = &Privileges_CredentialSpec_File{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = &Privileges_CredentialSpec_Registry{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Privileges_SELinuxContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disable = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/types.proto", fileDescriptorTypes) } + +var fileDescriptorTypes = []byte{ + // 5116 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x7a, 0x5d, 0x6c, 0x23, 0x59, + 0x56, 0x7f, 0xec, 0xd8, 0x8e, 0x7d, 0xec, 0x24, 0xd5, 0xb7, 0xb3, 0x3d, 0x69, 0x6f, 0x4f, 0xe2, + 0xa9, 0x99, 0xde, 0x99, 0xed, 0x9d, 0xbf, 0xfb, 0x6b, 0x77, 0xd5, 0x33, 0xf3, 0xdf, 0x9d, 0xb1, + 0xcb, 0x95, 0x8e, 0xb7, 0xd3, 0xb6, 0x75, 0xed, 0x74, 0xef, 0x22, 0x41, 0x51, 0xa9, 0xba, 0x71, + 0x6a, 0x52, 0xae, 0x6b, 0xaa, 0xca, 0xe9, 0x36, 0x0b, 0x62, 0xc4, 0x03, 0xa0, 0x3c, 0xc1, 0x0b, + 0x2c, 0x42, 0x41, 0x48, 0xf0, 0xc6, 0x03, 0x0f, 0x20, 0x21, 0x78, 0x1a, 0x24, 0x84, 0x56, 0xbc, + 0xc0, 0x82, 0x84, 0x56, 0x20, 0x05, 0x36, 0x0f, 0xbc, 0xad, 0xe0, 0x05, 0xf1, 0xc2, 0x03, 0xba, + 0x1f, 0x55, 0xae, 0xb8, 0x2b, 0xc9, 0x0c, 0xbb, 0x2f, 0x89, 0xef, 0x39, 0xbf, 0x73, 0xee, 0xbd, + 0xe7, 0xde, 0x7b, 0xee, 0x39, 0xe7, 0x16, 0xdc, 0x19, 0x3a, 0xe1, 0xc1, 0x64, 0xaf, 0x6e, 0xd1, + 0xd1, 0x5d, 0x9b, 0x5a, 0x87, 0xc4, 0xbf, 0x1b, 0xbc, 0x30, 0xfd, 0xd1, 0xa1, 0x13, 0xde, 0x35, + 0xc7, 0xce, 0xdd, 0x70, 0x3a, 0x26, 0x41, 0x7d, 0xec, 0xd3, 0x90, 0x22, 0x24, 0x00, 0xf5, 0x08, + 0x50, 0x3f, 0xba, 0x5f, 0xdd, 0x1c, 0x52, 0x3a, 0x74, 0xc9, 0x5d, 0x8e, 0xd8, 0x9b, 0xec, 0xdf, + 0x0d, 0x9d, 0x11, 0x09, 0x42, 0x73, 0x34, 0x16, 0x42, 0xd5, 0x8d, 0x79, 0x80, 0x3d, 0xf1, 0xcd, + 0xd0, 0xa1, 0x9e, 0xe4, 0xaf, 0x0d, 0xe9, 0x90, 0xf2, 0x9f, 0x77, 0xd9, 0x2f, 0x41, 0x55, 0x37, + 0x61, 0xe9, 0x19, 0xf1, 0x03, 0x87, 0x7a, 0x68, 0x0d, 0xf2, 0x8e, 0x67, 0x93, 0x97, 0xeb, 0x99, + 0x5a, 0xe6, 0x9d, 0x1c, 0x16, 0x0d, 0xf5, 0x1e, 0x40, 0x9b, 0xfd, 0xd0, 0xbd, 0xd0, 0x9f, 0x22, + 0x05, 0x16, 0x0f, 0xc9, 0x94, 0x23, 0x4a, 0x98, 0xfd, 0x64, 0x94, 0x23, 0xd3, 0x5d, 0xcf, 0x0a, + 0xca, 0x91, 0xe9, 0xaa, 0x3f, 0xca, 0x40, 0xb9, 0xe1, 0x79, 0x34, 0xe4, 0xbd, 0x07, 0x08, 0x41, + 0xce, 0x33, 0x47, 0x44, 0x0a, 0xf1, 0xdf, 0x48, 0x83, 0x82, 0x6b, 0xee, 0x11, 0x37, 0x58, 0xcf, + 0xd6, 0x16, 0xdf, 0x29, 0x3f, 0xf8, 0x4a, 0xfd, 0xd5, 0x29, 0xd7, 0x13, 0x4a, 0xea, 0x3b, 0x1c, + 0xcd, 0x07, 0x81, 0xa5, 0x28, 0xfa, 0x26, 0x2c, 0x39, 0x9e, 0xed, 0x58, 0x24, 0x58, 0xcf, 0x71, + 0x2d, 0x1b, 0x69, 0x5a, 0x66, 0xa3, 0x6f, 0xe6, 0xbe, 0x7f, 0xba, 0xb9, 0x80, 0x23, 0xa1, 0xea, + 0x7b, 0x50, 0x4e, 0xa8, 0x4d, 0x99, 0xdb, 0x1a, 0xe4, 0x8f, 0x4c, 0x77, 0x42, 0xe4, 0xec, 0x44, + 0xe3, 0xfd, 0xec, 0xa3, 0x8c, 0xfa, 0x11, 0xac, 0x75, 0xcc, 0x11, 0xb1, 0x1f, 0x13, 0x8f, 0xf8, + 0x8e, 0x85, 0x49, 0x40, 0x27, 0xbe, 0x45, 0xd8, 0x5c, 0x0f, 0x1d, 0xcf, 0x8e, 0xe6, 0xca, 0x7e, + 0xa7, 0x6b, 0x51, 0x35, 0x78, 0xad, 0xe5, 0x04, 0x96, 0x4f, 0x42, 0xf2, 0xb9, 0x95, 0x2c, 0x46, + 0x4a, 0x4e, 0x33, 0xb0, 0x3a, 0x2f, 0xfd, 0x33, 0x70, 0x9d, 0x99, 0xd8, 0x36, 0x7c, 0x49, 0x31, + 0x82, 0x31, 0xb1, 0xb8, 0xb2, 0xf2, 0x83, 0x77, 0xd2, 0x2c, 0x94, 0x36, 0x93, 0xed, 0x05, 0x7c, + 0x8d, 0xab, 0x89, 0x08, 0xfd, 0x31, 0xb1, 0x90, 0x05, 0x37, 0x6c, 0x39, 0xe8, 0x39, 0xf5, 0x59, + 0xae, 0x3e, 0x75, 0x19, 0x2f, 0x98, 0xe6, 0xf6, 0x02, 0x5e, 0x8b, 0x94, 0x25, 0x3b, 0x69, 0x02, + 0x14, 0x23, 0xdd, 0xea, 0xf7, 0x32, 0x50, 0x8a, 0x98, 0x01, 0xfa, 0x32, 0x94, 0x3c, 0xd3, 0xa3, + 0x86, 0x35, 0x9e, 0x04, 0x7c, 0x42, 0x8b, 0xcd, 0xca, 0xd9, 0xe9, 0x66, 0xb1, 0x63, 0x7a, 0x54, + 0xeb, 0xed, 0x06, 0xb8, 0xc8, 0xd8, 0xda, 0x78, 0x12, 0xa0, 0x37, 0xa0, 0x32, 0x22, 0x23, 0xea, + 0x4f, 0x8d, 0xbd, 0x69, 0x48, 0x02, 0x69, 0xb6, 0xb2, 0xa0, 0x35, 0x19, 0x09, 0x7d, 0x03, 0x96, + 0x86, 0x62, 0x48, 0xeb, 0x8b, 0x7c, 0xfb, 0xbc, 0x99, 0x36, 0xfa, 0xb9, 0x51, 0xe3, 0x48, 0x46, + 0xfd, 0xcd, 0x0c, 0xac, 0xc5, 0x54, 0xf2, 0x0b, 0x13, 0xc7, 0x27, 0x23, 0xe2, 0x85, 0x01, 0xfa, + 0x1a, 0x14, 0x5c, 0x67, 0xe4, 0x84, 0x81, 0xb4, 0xf9, 0xeb, 0x69, 0x6a, 0xe3, 0x49, 0x61, 0x09, + 0x46, 0x0d, 0xa8, 0xf8, 0x24, 0x20, 0xfe, 0x91, 0xd8, 0xf1, 0xd2, 0xa2, 0x57, 0x08, 0x9f, 0x13, + 0x51, 0xb7, 0xa0, 0xd8, 0x73, 0xcd, 0x70, 0x9f, 0xfa, 0x23, 0xa4, 0x42, 0xc5, 0xf4, 0xad, 0x03, + 0x27, 0x24, 0x56, 0x38, 0xf1, 0xa3, 0xd3, 0x77, 0x8e, 0x86, 0x6e, 0x40, 0x96, 0x8a, 0x8e, 0x4a, + 0xcd, 0xc2, 0xd9, 0xe9, 0x66, 0xb6, 0xdb, 0xc7, 0x59, 0x1a, 0xa8, 0x1f, 0xc0, 0xb5, 0x9e, 0x3b, + 0x19, 0x3a, 0x5e, 0x8b, 0x04, 0x96, 0xef, 0x8c, 0x99, 0x76, 0xb6, 0x2b, 0x99, 0x8f, 0x8a, 0x76, + 0x25, 0xfb, 0x1d, 0x1f, 0xed, 0xec, 0xec, 0x68, 0xab, 0xbf, 0x9e, 0x85, 0x6b, 0xba, 0x37, 0x74, + 0x3c, 0x92, 0x94, 0xbe, 0x0d, 0x2b, 0x84, 0x13, 0x8d, 0x23, 0xe1, 0x6e, 0xa4, 0x9e, 0x65, 0x41, + 0x8d, 0x7c, 0x50, 0x7b, 0xce, 0x2f, 0xdc, 0x4f, 0x9b, 0xfe, 0x2b, 0xda, 0x53, 0xbd, 0x83, 0x0e, + 0x4b, 0x63, 0x3e, 0x89, 0x40, 0x2e, 0xef, 0xed, 0x34, 0x5d, 0xaf, 0xcc, 0x33, 0x72, 0x12, 0x52, + 0xf6, 0x27, 0x71, 0x12, 0x7f, 0x9b, 0x85, 0xd5, 0x0e, 0xb5, 0xcf, 0xd9, 0xa1, 0x0a, 0xc5, 0x03, + 0x1a, 0x84, 0x09, 0x87, 0x18, 0xb7, 0xd1, 0x23, 0x28, 0x8e, 0xe5, 0xf2, 0xc9, 0xd5, 0xbf, 0x95, + 0x3e, 0x64, 0x81, 0xc1, 0x31, 0x1a, 0x7d, 0x00, 0xa5, 0xe8, 0xc8, 0xb0, 0xd9, 0x7e, 0x86, 0x8d, + 0x33, 0xc3, 0xa3, 0x6f, 0x40, 0x41, 0x2c, 0xc2, 0x7a, 0x8e, 0x4b, 0xde, 0xfe, 0x4c, 0x36, 0xc7, + 0x52, 0x08, 0x3d, 0x86, 0x62, 0xe8, 0x06, 0x86, 0xe3, 0xed, 0xd3, 0xf5, 0x3c, 0x57, 0xb0, 0x99, + 0xea, 0x64, 0xa8, 0x4d, 0x06, 0x3b, 0xfd, 0xb6, 0xb7, 0x4f, 0x9b, 0xe5, 0xb3, 0xd3, 0xcd, 0x25, + 0xd9, 0xc0, 0x4b, 0xa1, 0x1b, 0xb0, 0x1f, 0xe8, 0x16, 0xe4, 0xf6, 0x9d, 0x71, 0xb0, 0x5e, 0xa8, + 0x65, 0xde, 0x29, 0x36, 0x8b, 0x67, 0xa7, 0x9b, 0xb9, 0xad, 0x76, 0xaf, 0x8f, 0x39, 0x55, 0xfd, + 0xad, 0x0c, 0x94, 0x13, 0x3a, 0xd0, 0xeb, 0x00, 0xa1, 0x3f, 0x09, 0x42, 0xc3, 0xa7, 0x34, 0xe4, + 0xa6, 0xac, 0xe0, 0x12, 0xa7, 0x60, 0x4a, 0x43, 0x54, 0x87, 0xeb, 0x16, 0xf1, 0x43, 0xc3, 0x09, + 0x82, 0x09, 0xf1, 0x8d, 0x60, 0xb2, 0xf7, 0x31, 0xb1, 0x42, 0x6e, 0xd6, 0x0a, 0xbe, 0xc6, 0x58, + 0x6d, 0xce, 0xe9, 0x0b, 0x06, 0x7a, 0x08, 0x37, 0x92, 0xf8, 0xf1, 0x64, 0xcf, 0x75, 0x2c, 0x83, + 0x2d, 0xf5, 0x22, 0x17, 0xb9, 0x3e, 0x13, 0xe9, 0x71, 0xde, 0x13, 0x32, 0x55, 0x7f, 0x98, 0x01, + 0x05, 0x9b, 0xfb, 0xe1, 0x53, 0x32, 0xda, 0x23, 0x7e, 0x3f, 0x34, 0xc3, 0x49, 0x80, 0x6e, 0x40, + 0xc1, 0x25, 0xa6, 0x4d, 0x7c, 0x3e, 0xa8, 0x22, 0x96, 0x2d, 0xb4, 0xcb, 0xce, 0xb7, 0x69, 0x1d, + 0x98, 0x7b, 0x8e, 0xeb, 0x84, 0x53, 0x3e, 0x94, 0x95, 0xf4, 0x0d, 0x3e, 0xaf, 0xb3, 0x8e, 0x13, + 0x82, 0xf8, 0x9c, 0x1a, 0xb4, 0x0e, 0x4b, 0x23, 0x12, 0x04, 0xe6, 0x90, 0xf0, 0x91, 0x96, 0x70, + 0xd4, 0x54, 0x3f, 0x80, 0x4a, 0x52, 0x0e, 0x95, 0x61, 0x69, 0xb7, 0xf3, 0xa4, 0xd3, 0x7d, 0xde, + 0x51, 0x16, 0xd0, 0x2a, 0x94, 0x77, 0x3b, 0x58, 0x6f, 0x68, 0xdb, 0x8d, 0xe6, 0x8e, 0xae, 0x64, + 0xd0, 0x32, 0x94, 0x66, 0xcd, 0xac, 0xfa, 0xa7, 0x19, 0x00, 0x66, 0x6e, 0x39, 0xa9, 0xf7, 0x21, + 0x1f, 0x84, 0x66, 0x28, 0xf6, 0xec, 0xca, 0x83, 0xb7, 0x2e, 0x5a, 0x61, 0x39, 0x5e, 0xf6, 0x8f, + 0x60, 0x21, 0x92, 0x1c, 0x61, 0xf6, 0xdc, 0x08, 0x99, 0xfb, 0x30, 0x6d, 0xdb, 0x97, 0x03, 0xe7, + 0xbf, 0xd5, 0x0f, 0x20, 0xcf, 0xa5, 0xcf, 0x0f, 0xb7, 0x08, 0xb9, 0x16, 0xfb, 0x95, 0x41, 0x25, + 0xc8, 0x63, 0xbd, 0xd1, 0xfa, 0x8e, 0x92, 0x45, 0x0a, 0x54, 0x5a, 0xed, 0xbe, 0xd6, 0xed, 0x74, + 0x74, 0x6d, 0xa0, 0xb7, 0x94, 0x45, 0xf5, 0x36, 0xe4, 0xdb, 0x23, 0xa6, 0xf9, 0x16, 0x3b, 0x10, + 0xfb, 0xc4, 0x27, 0x9e, 0x15, 0x9d, 0xb3, 0x19, 0x41, 0xfd, 0x71, 0x19, 0xf2, 0x4f, 0xe9, 0xc4, + 0x0b, 0xd1, 0x83, 0x84, 0x53, 0x5b, 0x49, 0x8f, 0x1f, 0x38, 0xb0, 0x3e, 0x98, 0x8e, 0x89, 0x74, + 0x7a, 0x37, 0xa0, 0x20, 0x8e, 0x8e, 0x9c, 0x8e, 0x6c, 0x31, 0x7a, 0x68, 0xfa, 0x43, 0x12, 0xca, + 0xf9, 0xc8, 0x16, 0x7a, 0x87, 0xdd, 0x67, 0xa6, 0x4d, 0x3d, 0x77, 0xca, 0x4f, 0x58, 0x51, 0x5c, + 0x5a, 0x98, 0x98, 0x76, 0xd7, 0x73, 0xa7, 0x38, 0xe6, 0xa2, 0x6d, 0xa8, 0xec, 0x39, 0x9e, 0x6d, + 0xd0, 0xb1, 0xb8, 0x02, 0xf2, 0x17, 0x9f, 0x47, 0x31, 0xaa, 0xa6, 0xe3, 0xd9, 0x5d, 0x01, 0xc6, + 0xe5, 0xbd, 0x59, 0x03, 0x75, 0x60, 0xe5, 0x88, 0xba, 0x93, 0x11, 0x89, 0x75, 0x15, 0xb8, 0xae, + 0xb7, 0x2f, 0xd6, 0xf5, 0x8c, 0xe3, 0x23, 0x6d, 0xcb, 0x47, 0xc9, 0x26, 0x7a, 0x02, 0xcb, 0xe1, + 0x68, 0xbc, 0x1f, 0xc4, 0xea, 0x96, 0xb8, 0xba, 0x2f, 0x5d, 0x62, 0x30, 0x06, 0x8f, 0xb4, 0x55, + 0xc2, 0x44, 0x0b, 0x3d, 0x86, 0xb2, 0x45, 0xbd, 0xc0, 0x09, 0x42, 0xe2, 0x59, 0xd3, 0xf5, 0x22, + 0xb7, 0xfd, 0x25, 0xb3, 0xd4, 0x66, 0x60, 0x9c, 0x94, 0xac, 0xfe, 0xea, 0x22, 0x94, 0x13, 0x26, + 0x40, 0x7d, 0x28, 0x8f, 0x7d, 0x3a, 0x36, 0x87, 0xfc, 0x3e, 0x94, 0x8b, 0x7a, 0xff, 0x33, 0x99, + 0xaf, 0xde, 0x9b, 0x09, 0xe2, 0xa4, 0x16, 0xf5, 0x24, 0x0b, 0xe5, 0x04, 0x13, 0xdd, 0x81, 0x22, + 0xee, 0xe1, 0xf6, 0xb3, 0xc6, 0x40, 0x57, 0x16, 0xaa, 0xb7, 0x8e, 0x4f, 0x6a, 0xeb, 0x5c, 0x5b, + 0x52, 0x41, 0xcf, 0x77, 0x8e, 0xd8, 0x1e, 0x7e, 0x07, 0x96, 0x22, 0x68, 0xa6, 0xfa, 0xc5, 0xe3, + 0x93, 0xda, 0x6b, 0xf3, 0xd0, 0x04, 0x12, 0xf7, 0xb7, 0x1b, 0x58, 0x6f, 0x29, 0xd9, 0x74, 0x24, + 0xee, 0x1f, 0x98, 0x3e, 0xb1, 0xd1, 0x97, 0xa0, 0x20, 0x81, 0x8b, 0xd5, 0xea, 0xf1, 0x49, 0xed, + 0xc6, 0x3c, 0x70, 0x86, 0xc3, 0xfd, 0x9d, 0xc6, 0x33, 0x5d, 0xc9, 0xa5, 0xe3, 0x70, 0xdf, 0x35, + 0x8f, 0x08, 0x7a, 0x0b, 0xf2, 0x02, 0x96, 0xaf, 0xde, 0x3c, 0x3e, 0xa9, 0x7d, 0xe1, 0x15, 0x75, + 0x0c, 0x55, 0x5d, 0xff, 0x8d, 0x3f, 0xdc, 0x58, 0xf8, 0xcb, 0x3f, 0xda, 0x50, 0xe6, 0xd9, 0xd5, + 0xff, 0xc9, 0xc0, 0xf2, 0xb9, 0xbd, 0x83, 0x54, 0x28, 0x78, 0xd4, 0xa2, 0x63, 0x71, 0x4d, 0x16, + 0x9b, 0x70, 0x76, 0xba, 0x59, 0xe8, 0x50, 0x8d, 0x8e, 0xa7, 0x58, 0x72, 0xd0, 0x93, 0xb9, 0x8b, + 0xfe, 0xe1, 0x67, 0xdc, 0x98, 0xa9, 0x57, 0xfd, 0x87, 0xb0, 0x6c, 0xfb, 0xce, 0x11, 0xf1, 0x0d, + 0x8b, 0x7a, 0xfb, 0xce, 0x50, 0x5e, 0x81, 0xd5, 0xd4, 0x68, 0x94, 0x03, 0x71, 0x45, 0x08, 0x68, + 0x1c, 0xff, 0x13, 0x5c, 0xf2, 0xd5, 0x67, 0x50, 0x49, 0x6e, 0x75, 0x76, 0x2f, 0x05, 0xce, 0x2f, + 0x12, 0x19, 0x76, 0xf2, 0x20, 0x15, 0x97, 0x18, 0x45, 0x04, 0x9d, 0x6f, 0x43, 0x6e, 0x44, 0x6d, + 0xa1, 0x67, 0xb9, 0x79, 0x9d, 0xc5, 0x1a, 0xff, 0x7c, 0xba, 0x59, 0xa6, 0x41, 0x7d, 0xcb, 0x71, + 0xc9, 0x53, 0x6a, 0x13, 0xcc, 0x01, 0xea, 0x11, 0xe4, 0x98, 0xcf, 0x41, 0x5f, 0x84, 0x5c, 0xb3, + 0xdd, 0x69, 0x29, 0x0b, 0xd5, 0x6b, 0xc7, 0x27, 0xb5, 0x65, 0x6e, 0x12, 0xc6, 0x60, 0x7b, 0x17, + 0x6d, 0x42, 0xe1, 0x59, 0x77, 0x67, 0xf7, 0x29, 0xdb, 0x5e, 0xd7, 0x8f, 0x4f, 0x6a, 0xab, 0x31, + 0x5b, 0x18, 0x0d, 0xbd, 0x0e, 0xf9, 0xc1, 0xd3, 0xde, 0x56, 0x5f, 0xc9, 0x56, 0xd1, 0xf1, 0x49, + 0x6d, 0x25, 0xe6, 0xf3, 0x31, 0x57, 0xaf, 0xc9, 0x55, 0x2d, 0xc5, 0x74, 0xf5, 0x07, 0x19, 0x28, + 0x27, 0x0e, 0x1c, 0xdb, 0x98, 0x2d, 0x7d, 0xab, 0xb1, 0xbb, 0x33, 0x50, 0x16, 0x12, 0x1b, 0x33, + 0x01, 0x69, 0x91, 0x7d, 0x73, 0xe2, 0x32, 0x3f, 0x07, 0x5a, 0xb7, 0xd3, 0x6f, 0xf7, 0x07, 0x7a, + 0x67, 0xa0, 0x64, 0xaa, 0xeb, 0xc7, 0x27, 0xb5, 0xb5, 0x79, 0xf0, 0xd6, 0xc4, 0x75, 0xd9, 0xd6, + 0xd4, 0x1a, 0xda, 0x36, 0xdf, 0xeb, 0xb3, 0xad, 0x99, 0x40, 0x69, 0xa6, 0x75, 0x40, 0x6c, 0xf4, + 0x2e, 0x94, 0x5a, 0xfa, 0x8e, 0xfe, 0xb8, 0xc1, 0xbd, 0x7b, 0xf5, 0xf5, 0xe3, 0x93, 0xda, 0xcd, + 0x57, 0x7b, 0x77, 0xc9, 0xd0, 0x0c, 0x89, 0x3d, 0xb7, 0x45, 0x13, 0x10, 0xf5, 0xbf, 0xb2, 0xb0, + 0x8c, 0x59, 0xb2, 0xec, 0x87, 0x3d, 0xea, 0x3a, 0xd6, 0x14, 0xf5, 0xa0, 0x64, 0x51, 0xcf, 0x76, + 0x12, 0x7e, 0xe2, 0xc1, 0x05, 0x01, 0xd3, 0x4c, 0x2a, 0x6a, 0x69, 0x91, 0x24, 0x9e, 0x29, 0x41, + 0x77, 0x21, 0x6f, 0x13, 0xd7, 0x9c, 0xca, 0xc8, 0xed, 0x66, 0x5d, 0xa4, 0xe3, 0xf5, 0x28, 0x1d, + 0xaf, 0xb7, 0x64, 0x3a, 0x8e, 0x05, 0x8e, 0x67, 0x28, 0xe6, 0x4b, 0xc3, 0x0c, 0x43, 0x32, 0x1a, + 0x87, 0x22, 0x6c, 0xcb, 0xe1, 0xf2, 0xc8, 0x7c, 0xd9, 0x90, 0x24, 0x74, 0x1f, 0x0a, 0x2f, 0x1c, + 0xcf, 0xa6, 0x2f, 0x64, 0x64, 0x76, 0x89, 0x52, 0x09, 0x54, 0x8f, 0x59, 0x48, 0x32, 0x37, 0x4c, + 0xb6, 0x87, 0x3a, 0xdd, 0x8e, 0x1e, 0xed, 0x21, 0xc9, 0xef, 0x7a, 0x1d, 0xea, 0xb1, 0xf3, 0x0f, + 0xdd, 0x8e, 0xb1, 0xd5, 0x68, 0xef, 0xec, 0x62, 0xb6, 0x8f, 0xd6, 0x8e, 0x4f, 0x6a, 0x4a, 0x0c, + 0xd9, 0x32, 0x1d, 0x97, 0xa5, 0x0a, 0x37, 0x61, 0xb1, 0xd1, 0xf9, 0x8e, 0x92, 0xad, 0x2a, 0xc7, + 0x27, 0xb5, 0x4a, 0xcc, 0x6e, 0x78, 0xd3, 0x99, 0xdd, 0xe7, 0xfb, 0x55, 0xff, 0x6e, 0x11, 0x2a, + 0xbb, 0x63, 0xdb, 0x0c, 0x89, 0x38, 0x67, 0xa8, 0x06, 0xe5, 0xb1, 0xe9, 0x9b, 0xae, 0x4b, 0x5c, + 0x27, 0x18, 0xc9, 0x42, 0x43, 0x92, 0x84, 0xde, 0xfb, 0xac, 0x66, 0x6c, 0x16, 0xd9, 0xd9, 0xf9, + 0xde, 0xbf, 0x6e, 0x66, 0x22, 0x83, 0xee, 0xc2, 0xca, 0xbe, 0x18, 0xad, 0x61, 0x5a, 0x7c, 0x61, + 0x17, 0xf9, 0xc2, 0xd6, 0xd3, 0x16, 0x36, 0x39, 0xac, 0xba, 0x9c, 0x64, 0x83, 0x4b, 0xe1, 0xe5, + 0xfd, 0x64, 0x13, 0x3d, 0x84, 0xa5, 0x11, 0xf5, 0x9c, 0x90, 0xfa, 0x57, 0xaf, 0x42, 0x84, 0x44, + 0x77, 0xe0, 0x1a, 0x5b, 0xdc, 0x68, 0x3c, 0x9c, 0xcd, 0xaf, 0xf3, 0x2c, 0x5e, 0x1d, 0x99, 0x2f, + 0x65, 0x87, 0x98, 0x91, 0x51, 0x13, 0xf2, 0xd4, 0x67, 0xf1, 0x62, 0x81, 0x0f, 0xf7, 0xdd, 0x2b, + 0x87, 0x2b, 0x1a, 0x5d, 0x26, 0x83, 0x85, 0xa8, 0xfa, 0x75, 0x58, 0x3e, 0x37, 0x09, 0x16, 0x26, + 0xf5, 0x1a, 0xbb, 0x7d, 0x5d, 0x59, 0x40, 0x15, 0x28, 0x6a, 0xdd, 0xce, 0xa0, 0xdd, 0xd9, 0x65, + 0x71, 0x5e, 0x05, 0x8a, 0xb8, 0xbb, 0xb3, 0xd3, 0x6c, 0x68, 0x4f, 0x94, 0xac, 0x5a, 0x87, 0x72, + 0x42, 0x1b, 0x5a, 0x01, 0xe8, 0x0f, 0xba, 0x3d, 0x63, 0xab, 0x8d, 0xfb, 0x03, 0x11, 0x25, 0xf6, + 0x07, 0x0d, 0x3c, 0x90, 0x84, 0x8c, 0xfa, 0x1f, 0xd9, 0x68, 0x45, 0x65, 0x60, 0xd8, 0x3c, 0x1f, + 0x18, 0x5e, 0x32, 0x78, 0x19, 0x1a, 0xce, 0x1a, 0x71, 0x80, 0xf8, 0x1e, 0x00, 0xdf, 0x38, 0xc4, + 0x36, 0xcc, 0x50, 0x2e, 0x7c, 0xf5, 0x15, 0x23, 0x0f, 0xa2, 0x7a, 0x17, 0x2e, 0x49, 0x74, 0x23, + 0x44, 0xdf, 0x80, 0x8a, 0x45, 0x47, 0x63, 0x97, 0x48, 0xe1, 0xc5, 0x2b, 0x85, 0xcb, 0x31, 0xbe, + 0x11, 0x26, 0x43, 0xd3, 0xdc, 0xf9, 0xe0, 0xf9, 0xd7, 0x32, 0x91, 0x65, 0x52, 0xa2, 0xd1, 0x0a, + 0x14, 0x77, 0x7b, 0xad, 0xc6, 0xa0, 0xdd, 0x79, 0xac, 0x64, 0x10, 0x40, 0x81, 0x9b, 0xba, 0xa5, + 0x64, 0x59, 0x14, 0xad, 0x75, 0x9f, 0xf6, 0x76, 0x74, 0xee, 0xb1, 0xd0, 0x1a, 0x28, 0x91, 0xb1, + 0x0d, 0x6e, 0x48, 0xbd, 0xa5, 0xe4, 0xd0, 0x75, 0x58, 0x8d, 0xa9, 0x52, 0x32, 0x8f, 0x6e, 0x00, + 0x8a, 0x89, 0x33, 0x15, 0x05, 0xf5, 0x97, 0x61, 0x55, 0xa3, 0x5e, 0x68, 0x3a, 0x5e, 0x9c, 0x61, + 0x3c, 0x60, 0x93, 0x96, 0x24, 0xc3, 0x91, 0x75, 0xa2, 0xe6, 0xea, 0xd9, 0xe9, 0x66, 0x39, 0x86, + 0xb6, 0x5b, 0x3c, 0x54, 0x92, 0x0d, 0x9b, 0x9d, 0xdf, 0xb1, 0x63, 0x73, 0xe3, 0xe6, 0x9b, 0x4b, + 0x67, 0xa7, 0x9b, 0x8b, 0xbd, 0x76, 0x0b, 0x33, 0x1a, 0xfa, 0x22, 0x94, 0xc8, 0x4b, 0x27, 0x34, + 0x2c, 0x76, 0x2f, 0x31, 0x03, 0xe6, 0x71, 0x91, 0x11, 0x34, 0x76, 0x0d, 0x35, 0x01, 0x7a, 0xd4, + 0x0f, 0x65, 0xcf, 0x5f, 0x85, 0xfc, 0x98, 0xfa, 0xbc, 0xb2, 0x71, 0x61, 0xbd, 0x8d, 0xc1, 0xc5, + 0x46, 0xc5, 0x02, 0xac, 0xfe, 0xee, 0x22, 0xc0, 0xc0, 0x0c, 0x0e, 0xa5, 0x92, 0x47, 0x50, 0x8a, + 0x6b, 0x97, 0xb2, 0x44, 0x72, 0xe9, 0x6a, 0xc7, 0x60, 0xf4, 0x30, 0xda, 0x6c, 0x22, 0x77, 0x4a, + 0x4d, 0x71, 0xa3, 0x8e, 0xd2, 0xd2, 0x8f, 0xf3, 0x09, 0x12, 0xbb, 0xe6, 0x89, 0xef, 0xcb, 0x95, + 0x67, 0x3f, 0x91, 0xc6, 0xaf, 0x05, 0x61, 0x34, 0x19, 0x7d, 0xa7, 0x16, 0x85, 0xe6, 0x56, 0x64, + 0x7b, 0x01, 0xcf, 0xe4, 0xd0, 0x87, 0x50, 0x66, 0xf3, 0x36, 0x02, 0xce, 0x93, 0x81, 0xf7, 0x85, + 0xa6, 0x12, 0x1a, 0x30, 0x8c, 0x67, 0x56, 0x7e, 0x1d, 0xc0, 0x1c, 0x8f, 0x5d, 0x87, 0xd8, 0xc6, + 0xde, 0x94, 0x47, 0xda, 0x25, 0x5c, 0x92, 0x94, 0xe6, 0x94, 0x1d, 0x97, 0x88, 0x6d, 0x86, 0x3c, + 0x7a, 0xbe, 0xc2, 0x80, 0x12, 0xdd, 0x08, 0x9b, 0x0a, 0xac, 0xf8, 0x13, 0x8f, 0x19, 0x54, 0x8e, + 0x4e, 0xfd, 0x93, 0x2c, 0xbc, 0xd6, 0x21, 0xe1, 0x0b, 0xea, 0x1f, 0x36, 0xc2, 0xd0, 0xb4, 0x0e, + 0x46, 0xc4, 0x93, 0xcb, 0x97, 0x48, 0x68, 0x32, 0xe7, 0x12, 0x9a, 0x75, 0x58, 0x32, 0x5d, 0xc7, + 0x0c, 0x88, 0x08, 0xde, 0x4a, 0x38, 0x6a, 0xb2, 0xb4, 0x8b, 0x25, 0x71, 0x24, 0x08, 0x88, 0xa8, + 0xba, 0xb0, 0x81, 0x47, 0x04, 0xf4, 0x5d, 0xb8, 0x21, 0xc3, 0x34, 0x33, 0xee, 0x8a, 0x25, 0x14, + 0x51, 0xf9, 0x56, 0x4f, 0xcd, 0x2a, 0xd3, 0x07, 0x27, 0xe3, 0xb8, 0x19, 0xb9, 0x3b, 0x0e, 0x65, + 0x54, 0xb8, 0x66, 0xa7, 0xb0, 0xaa, 0x8f, 0xe1, 0xe6, 0x85, 0x22, 0x9f, 0xab, 0xaa, 0xf3, 0x8f, + 0x59, 0x80, 0x76, 0xaf, 0xf1, 0x54, 0x1a, 0xa9, 0x05, 0x85, 0x7d, 0x73, 0xe4, 0xb8, 0xd3, 0xcb, + 0x3c, 0xe0, 0x0c, 0x5f, 0x6f, 0x08, 0x73, 0x6c, 0x71, 0x19, 0x2c, 0x65, 0x79, 0x4e, 0x39, 0xd9, + 0xf3, 0x48, 0x18, 0xe7, 0x94, 0xbc, 0xc5, 0x86, 0xe1, 0x9b, 0x5e, 0xbc, 0x75, 0x45, 0x83, 0x2d, + 0x00, 0x0b, 0x79, 0x5e, 0x98, 0xd3, 0xc8, 0x6d, 0xc9, 0x26, 0xda, 0xe6, 0xb5, 0x53, 0xe2, 0x1f, + 0x11, 0x7b, 0x3d, 0xcf, 0x8d, 0x7a, 0xd5, 0x78, 0xb0, 0x84, 0x0b, 0xdb, 0xc5, 0xd2, 0xd5, 0x0f, + 0x78, 0xc8, 0x34, 0x63, 0x7d, 0x2e, 0x1b, 0xdd, 0x83, 0xe5, 0x73, 0xf3, 0x7c, 0x25, 0x99, 0x6f, + 0xf7, 0x9e, 0x7d, 0x55, 0xc9, 0xc9, 0x5f, 0x5f, 0x57, 0x0a, 0xea, 0xdf, 0x2c, 0x0a, 0x47, 0x23, + 0xad, 0x9a, 0xfe, 0x66, 0x50, 0xe4, 0xbb, 0xdb, 0xa2, 0xae, 0x74, 0x00, 0x6f, 0x5f, 0xee, 0x7f, + 0x58, 0x4e, 0xc7, 0xe1, 0x38, 0x16, 0x44, 0x9b, 0x50, 0x16, 0xbb, 0xd8, 0x60, 0x07, 0x8e, 0x9b, + 0x75, 0x19, 0x83, 0x20, 0x31, 0x49, 0x74, 0x1b, 0x56, 0x78, 0xf1, 0x27, 0x38, 0x20, 0xb6, 0xc0, + 0xe4, 0x38, 0x66, 0x39, 0xa6, 0x72, 0xd8, 0x53, 0xa8, 0x48, 0x82, 0xc1, 0xe3, 0xf9, 0x3c, 0x1f, + 0xd0, 0x9d, 0xab, 0x06, 0x24, 0x44, 0x78, 0x98, 0x5f, 0x1e, 0xcf, 0x1a, 0xea, 0xcf, 0x43, 0x31, + 0x1a, 0x2c, 0x5a, 0x87, 0xc5, 0x81, 0xd6, 0x53, 0x16, 0xaa, 0xab, 0xc7, 0x27, 0xb5, 0x72, 0x44, + 0x1e, 0x68, 0x3d, 0xc6, 0xd9, 0x6d, 0xf5, 0x94, 0xcc, 0x79, 0xce, 0x6e, 0xab, 0x87, 0xaa, 0x90, + 0xeb, 0x6b, 0x83, 0x5e, 0x14, 0x9f, 0x45, 0x2c, 0x46, 0xab, 0xe6, 0x58, 0x7c, 0xa6, 0xee, 0x43, + 0x39, 0xd1, 0x3b, 0x7a, 0x13, 0x96, 0xda, 0x9d, 0xc7, 0x58, 0xef, 0xf7, 0x95, 0x85, 0xea, 0x8d, + 0xe3, 0x93, 0x1a, 0x4a, 0x70, 0xdb, 0xde, 0x90, 0xad, 0x1d, 0x7a, 0x1d, 0x72, 0xdb, 0x5d, 0x76, + 0xef, 0x8b, 0xe4, 0x22, 0x81, 0xd8, 0xa6, 0x41, 0x58, 0xbd, 0x2e, 0x03, 0xbf, 0xa4, 0x62, 0xf5, + 0xf7, 0x32, 0x50, 0x10, 0x07, 0x2d, 0x75, 0x11, 0x1b, 0xb0, 0x14, 0x95, 0x10, 0x44, 0xe2, 0xf7, + 0xf6, 0xc5, 0x49, 0x5a, 0x5d, 0xe6, 0x54, 0x62, 0x6b, 0x46, 0x72, 0xd5, 0xf7, 0xa1, 0x92, 0x64, + 0x7c, 0xae, 0x8d, 0xf9, 0x5d, 0x28, 0xb3, 0xbd, 0x1f, 0x25, 0x6b, 0x0f, 0xa0, 0x20, 0x9c, 0x45, + 0x7c, 0x0f, 0x5d, 0x9c, 0x31, 0x4a, 0x24, 0x7a, 0x04, 0x4b, 0x22, 0xcb, 0x8c, 0xea, 0xca, 0x1b, + 0x97, 0x9f, 0x30, 0x1c, 0xc1, 0xd5, 0x0f, 0x21, 0xd7, 0x23, 0xc4, 0x67, 0xb6, 0xf7, 0xa8, 0x4d, + 0x66, 0x57, 0xb7, 0x4c, 0x90, 0x6d, 0xd2, 0x6e, 0xb1, 0x04, 0xd9, 0x26, 0x6d, 0x3b, 0xae, 0x8d, + 0x65, 0x13, 0xb5, 0xb1, 0x01, 0x54, 0x9e, 0x13, 0x67, 0x78, 0x10, 0x12, 0x9b, 0x2b, 0x7a, 0x17, + 0x72, 0x63, 0x12, 0x0f, 0x7e, 0x3d, 0x75, 0xf3, 0x11, 0xe2, 0x63, 0x8e, 0x62, 0x3e, 0xe6, 0x05, + 0x97, 0x96, 0x8f, 0x21, 0xb2, 0xa5, 0xfe, 0x43, 0x16, 0x56, 0xda, 0x41, 0x30, 0x31, 0x3d, 0x2b, + 0x8a, 0xea, 0xbe, 0x79, 0x3e, 0xaa, 0x4b, 0x7d, 0x35, 0x3a, 0x2f, 0x72, 0xbe, 0xe4, 0x27, 0x6f, + 0xd6, 0x6c, 0x7c, 0xb3, 0xaa, 0x3f, 0xce, 0x44, 0x75, 0xbd, 0xdb, 0x09, 0x57, 0x20, 0x72, 0xc4, + 0xa4, 0x26, 0xb2, 0xeb, 0x1d, 0x7a, 0xf4, 0x85, 0x87, 0xde, 0x80, 0x3c, 0xd6, 0x3b, 0xfa, 0x73, + 0x25, 0x23, 0xb6, 0xe7, 0x39, 0x10, 0x26, 0x1e, 0x79, 0xc1, 0x34, 0xf5, 0xf4, 0x4e, 0x8b, 0x45, + 0x61, 0xd9, 0x14, 0x4d, 0x3d, 0xe2, 0xd9, 0x8e, 0x37, 0x44, 0x6f, 0x42, 0xa1, 0xdd, 0xef, 0xef, + 0xf2, 0x14, 0xf2, 0xb5, 0xe3, 0x93, 0xda, 0xf5, 0x73, 0x28, 0x5e, 0xd3, 0xb5, 0x19, 0x88, 0xa5, + 0x40, 0x2c, 0x3e, 0x4b, 0x01, 0xb1, 0xd8, 0x5a, 0x80, 0x70, 0x77, 0xd0, 0x18, 0xe8, 0x4a, 0x3e, + 0x05, 0x84, 0x29, 0xfb, 0x2b, 0x8f, 0xdb, 0xbf, 0x64, 0x41, 0x69, 0x58, 0x16, 0x19, 0x87, 0x8c, + 0x2f, 0xb3, 0xce, 0x01, 0x14, 0xc7, 0xec, 0x97, 0x43, 0xa2, 0x08, 0xea, 0x51, 0xea, 0xbb, 0xe7, + 0x9c, 0x5c, 0x1d, 0x53, 0x97, 0x34, 0xec, 0x91, 0x13, 0x04, 0x0e, 0xf5, 0x04, 0x0d, 0xc7, 0x9a, + 0xaa, 0xff, 0x99, 0x81, 0xeb, 0x29, 0x08, 0x74, 0x0f, 0x72, 0x3e, 0x75, 0xa3, 0x35, 0xbc, 0x75, + 0x51, 0xc9, 0x96, 0x89, 0x62, 0x8e, 0x44, 0x1b, 0x00, 0xe6, 0x24, 0xa4, 0x26, 0xef, 0x9f, 0xaf, + 0x5e, 0x11, 0x27, 0x28, 0xe8, 0x39, 0x14, 0x02, 0x62, 0xf9, 0x24, 0x8a, 0xb3, 0x3f, 0xfc, 0xbf, + 0x8e, 0xbe, 0xde, 0xe7, 0x6a, 0xb0, 0x54, 0x57, 0xad, 0x43, 0x41, 0x50, 0xd8, 0xb6, 0xb7, 0xcd, + 0xd0, 0x94, 0x05, 0x7d, 0xfe, 0x9b, 0xed, 0x26, 0xd3, 0x1d, 0x46, 0xbb, 0xc9, 0x74, 0x87, 0xea, + 0x5f, 0x67, 0x01, 0xf4, 0x97, 0x21, 0xf1, 0x3d, 0xd3, 0xd5, 0x1a, 0x48, 0x4f, 0xdc, 0x0c, 0x62, + 0xb6, 0x5f, 0x4e, 0x7d, 0xc3, 0x88, 0x25, 0xea, 0x5a, 0x23, 0xe5, 0x6e, 0xb8, 0x09, 0x8b, 0x13, + 0x5f, 0x3e, 0x65, 0x8b, 0x18, 0x79, 0x17, 0xef, 0x60, 0x46, 0x43, 0xfa, 0xcc, 0x6d, 0x2d, 0x5e, + 0xfc, 0x60, 0x9d, 0xe8, 0x20, 0xd5, 0x75, 0xb1, 0x93, 0x6f, 0x99, 0x86, 0x45, 0xe4, 0xad, 0x52, + 0x11, 0x27, 0x5f, 0x6b, 0x68, 0xc4, 0x0f, 0x71, 0xc1, 0x32, 0xd9, 0xff, 0x9f, 0xc8, 0xbf, 0xbd, + 0x0b, 0x30, 0x9b, 0x1a, 0xda, 0x80, 0xbc, 0xb6, 0xd5, 0xef, 0xef, 0x28, 0x0b, 0xc2, 0x81, 0xcf, + 0x58, 0x9c, 0xac, 0xfe, 0x45, 0x16, 0x8a, 0x5a, 0x43, 0x5e, 0xb9, 0x1a, 0x28, 0xdc, 0x2b, 0xf1, + 0x67, 0x10, 0xf2, 0x72, 0xec, 0xf8, 0x53, 0xe9, 0x58, 0x2e, 0x49, 0x78, 0x57, 0x98, 0x08, 0x1b, + 0xb5, 0xce, 0x05, 0x10, 0x86, 0x0a, 0x91, 0x46, 0x30, 0x2c, 0x33, 0xf2, 0xf1, 0x1b, 0x97, 0x1b, + 0x4b, 0xa4, 0x2e, 0xb3, 0x76, 0x80, 0xcb, 0x91, 0x12, 0xcd, 0x0c, 0xd0, 0x7b, 0xb0, 0x1a, 0x38, + 0x43, 0xcf, 0xf1, 0x86, 0x46, 0x64, 0x3c, 0xfe, 0x26, 0xd3, 0xbc, 0x76, 0x76, 0xba, 0xb9, 0xdc, + 0x17, 0x2c, 0x69, 0xc3, 0x65, 0x89, 0xd4, 0xb8, 0x29, 0xd1, 0xd7, 0x61, 0x25, 0x21, 0xca, 0xac, + 0x28, 0xcc, 0xae, 0x9c, 0x9d, 0x6e, 0x56, 0x62, 0xc9, 0x27, 0x64, 0x8a, 0x2b, 0xb1, 0xe0, 0x13, + 0xc2, 0x6b, 0x33, 0xfb, 0xd4, 0xb7, 0x88, 0xe1, 0xf3, 0x33, 0xcd, 0x6f, 0xf7, 0x1c, 0x2e, 0x73, + 0x9a, 0x38, 0xe6, 0xea, 0x33, 0xb8, 0xde, 0xf5, 0xad, 0x03, 0x12, 0x84, 0xc2, 0x14, 0xd2, 0x8a, + 0x1f, 0xc2, 0xad, 0xd0, 0x0c, 0x0e, 0x8d, 0x03, 0x27, 0x08, 0xa9, 0x3f, 0x35, 0x7c, 0x12, 0x12, + 0x8f, 0xf1, 0x0d, 0xfe, 0xcc, 0x2b, 0x0b, 0x82, 0x37, 0x19, 0x66, 0x5b, 0x40, 0x70, 0x84, 0xd8, + 0x61, 0x00, 0xb5, 0x0d, 0x15, 0x96, 0xc2, 0xc8, 0xa2, 0x1a, 0x9b, 0x3d, 0xb8, 0x74, 0x68, 0x7c, + 0xe6, 0x6b, 0xaa, 0xe4, 0xd2, 0xa1, 0xf8, 0xa9, 0x7e, 0x1b, 0x94, 0x96, 0x13, 0x8c, 0xcd, 0xd0, + 0x3a, 0x88, 0x2a, 0x9d, 0xa8, 0x05, 0xca, 0x01, 0x31, 0xfd, 0x70, 0x8f, 0x98, 0xa1, 0x31, 0x26, + 0xbe, 0x43, 0xed, 0xab, 0x57, 0x79, 0x35, 0x16, 0xe9, 0x71, 0x09, 0xf5, 0xbf, 0x33, 0x00, 0xd8, + 0xdc, 0x8f, 0xa2, 0xb5, 0xaf, 0xc0, 0xb5, 0xc0, 0x33, 0xc7, 0xc1, 0x01, 0x0d, 0x0d, 0xc7, 0x0b, + 0x89, 0x7f, 0x64, 0xba, 0xb2, 0xb8, 0xa3, 0x44, 0x8c, 0xb6, 0xa4, 0xa3, 0x77, 0x01, 0x1d, 0x12, + 0x32, 0x36, 0xa8, 0x6b, 0x1b, 0x11, 0x53, 0x3c, 0x42, 0xe7, 0xb0, 0xc2, 0x38, 0x5d, 0xd7, 0xee, + 0x47, 0x74, 0xd4, 0x84, 0x0d, 0x36, 0x7d, 0xe2, 0x85, 0xbe, 0x43, 0x02, 0x63, 0x9f, 0xfa, 0x46, + 0xe0, 0xd2, 0x17, 0xc6, 0x3e, 0x75, 0x5d, 0xfa, 0x82, 0xf8, 0x51, 0xdd, 0xac, 0xea, 0xd2, 0xa1, + 0x2e, 0x40, 0x5b, 0xd4, 0xef, 0xbb, 0xf4, 0xc5, 0x56, 0x84, 0x60, 0x21, 0xdd, 0x6c, 0xce, 0xa1, + 0x63, 0x1d, 0x46, 0x21, 0x5d, 0x4c, 0x1d, 0x38, 0xd6, 0x21, 0x7a, 0x13, 0x96, 0x89, 0x4b, 0x78, + 0xf9, 0x44, 0xa0, 0xf2, 0x1c, 0x55, 0x89, 0x88, 0x0c, 0xa4, 0x7e, 0x04, 0x8a, 0xee, 0x59, 0xfe, + 0x74, 0x9c, 0x58, 0xf3, 0x77, 0x01, 0x31, 0x27, 0x69, 0xb8, 0xd4, 0x3a, 0x34, 0x46, 0xa6, 0x67, + 0x0e, 0xd9, 0xb8, 0xc4, 0xeb, 0x9f, 0xc2, 0x38, 0x3b, 0xd4, 0x3a, 0x7c, 0x2a, 0xe9, 0xea, 0x7b, + 0x00, 0xfd, 0xb1, 0x4f, 0x4c, 0xbb, 0xcb, 0xa2, 0x09, 0x66, 0x3a, 0xde, 0x32, 0x6c, 0xf9, 0xb6, + 0x4a, 0x7d, 0x79, 0xd4, 0x15, 0xc1, 0x68, 0xc5, 0x74, 0xf5, 0x67, 0xe1, 0x7a, 0xcf, 0x35, 0x2d, + 0xfe, 0x9d, 0x41, 0x2f, 0x7e, 0xce, 0x42, 0x8f, 0xa0, 0x20, 0xa0, 0x72, 0x25, 0x53, 0x8f, 0xdb, + 0xac, 0xcf, 0xed, 0x05, 0x2c, 0xf1, 0xcd, 0x0a, 0xc0, 0x4c, 0x8f, 0xfa, 0x67, 0x19, 0x28, 0xc5, + 0xfa, 0x51, 0x4d, 0xbc, 0xd2, 0x84, 0xbe, 0xe9, 0x78, 0x32, 0xe3, 0x2f, 0xe1, 0x24, 0x09, 0xb5, + 0xa1, 0x3c, 0x8e, 0xa5, 0x2f, 0x8d, 0xe7, 0x52, 0x46, 0x8d, 0x93, 0xb2, 0xe8, 0x7d, 0x28, 0x45, + 0x8f, 0xd9, 0x91, 0x87, 0xbd, 0xfc, 0xed, 0x7b, 0x06, 0x57, 0xbf, 0x09, 0xf0, 0x2d, 0xea, 0x78, + 0x03, 0x7a, 0x48, 0x3c, 0xfe, 0xfc, 0xca, 0xf2, 0x45, 0x12, 0x59, 0x51, 0xb6, 0x78, 0x19, 0x40, + 0x2c, 0x41, 0xfc, 0x0a, 0x29, 0x9a, 0xea, 0x5f, 0x65, 0xa1, 0x80, 0x29, 0x0d, 0xb5, 0x06, 0xaa, + 0x41, 0x41, 0xfa, 0x09, 0x7e, 0xff, 0x34, 0x4b, 0x67, 0xa7, 0x9b, 0x79, 0xe1, 0x20, 0xf2, 0x16, + 0xf7, 0x0c, 0x09, 0x0f, 0x9e, 0xbd, 0xc8, 0x83, 0xa3, 0x7b, 0x50, 0x91, 0x20, 0xe3, 0xc0, 0x0c, + 0x0e, 0x44, 0xf2, 0xd6, 0x5c, 0x39, 0x3b, 0xdd, 0x04, 0x81, 0xdc, 0x36, 0x83, 0x03, 0x0c, 0x02, + 0xcd, 0x7e, 0x23, 0x1d, 0xca, 0x1f, 0x53, 0xc7, 0x33, 0x42, 0x3e, 0x09, 0x59, 0x68, 0x4c, 0x5d, + 0xc7, 0xd9, 0x54, 0xe5, 0x97, 0x0a, 0xf0, 0xf1, 0x6c, 0xf2, 0x3a, 0x2c, 0xfb, 0x94, 0x86, 0xc2, + 0x6d, 0x39, 0xd4, 0x93, 0x35, 0x8c, 0x5a, 0x6a, 0x69, 0x9b, 0xd2, 0x10, 0x4b, 0x1c, 0xae, 0xf8, + 0x89, 0x16, 0xba, 0x07, 0x6b, 0xae, 0x19, 0x84, 0x06, 0xf7, 0x77, 0xf6, 0x4c, 0x5b, 0x81, 0x1f, + 0x35, 0xc4, 0x78, 0x5b, 0x9c, 0x15, 0x49, 0xa8, 0xff, 0x94, 0x81, 0x32, 0x9b, 0x8c, 0xb3, 0xef, + 0x58, 0x2c, 0xc8, 0xfb, 0xfc, 0xb1, 0xc7, 0x4d, 0x58, 0xb4, 0x02, 0x5f, 0x1a, 0x95, 0x5f, 0xbe, + 0x5a, 0x1f, 0x63, 0x46, 0x43, 0x1f, 0x41, 0x41, 0xd6, 0x52, 0x44, 0xd8, 0xa1, 0x5e, 0x1d, 0x8e, + 0x4a, 0xdb, 0x48, 0x39, 0xbe, 0x97, 0x67, 0xa3, 0x13, 0x97, 0x00, 0x4e, 0x92, 0xd0, 0x0d, 0xc8, + 0x5a, 0xc2, 0x5c, 0xf2, 0x53, 0x18, 0xad, 0x83, 0xb3, 0x96, 0xa7, 0xfe, 0x20, 0x03, 0xcb, 0xb3, + 0x03, 0xcf, 0x76, 0xc0, 0x2d, 0x28, 0x05, 0x93, 0xbd, 0x60, 0x1a, 0x84, 0x64, 0x14, 0x3d, 0x2d, + 0xc7, 0x04, 0xd4, 0x86, 0x92, 0xe9, 0x0e, 0xa9, 0xef, 0x84, 0x07, 0x23, 0x99, 0xa5, 0xa6, 0x87, + 0x0a, 0x49, 0x9d, 0xf5, 0x46, 0x24, 0x82, 0x67, 0xd2, 0xd1, 0xbd, 0x2f, 0xbe, 0x3f, 0xe0, 0xf7, + 0xfe, 0x1b, 0x50, 0x71, 0xcd, 0x11, 0x2f, 0x2e, 0x85, 0xce, 0x48, 0xcc, 0x23, 0x87, 0xcb, 0x92, + 0x36, 0x70, 0x46, 0x44, 0x55, 0xa1, 0x14, 0x2b, 0x43, 0xab, 0x50, 0x6e, 0xe8, 0x7d, 0xe3, 0xfe, + 0x83, 0x47, 0xc6, 0x63, 0xed, 0xa9, 0xb2, 0x20, 0x63, 0xd3, 0x3f, 0xcf, 0xc0, 0xb2, 0x74, 0x47, + 0x32, 0xde, 0x7f, 0x13, 0x96, 0x7c, 0x73, 0x3f, 0x8c, 0x32, 0x92, 0x9c, 0xd8, 0xd5, 0xcc, 0xc3, + 0xb3, 0x8c, 0x84, 0xb1, 0xd2, 0x33, 0x92, 0xc4, 0xc7, 0x0e, 0x8b, 0x97, 0x7e, 0xec, 0x90, 0xfb, + 0xa9, 0x7c, 0xec, 0xa0, 0xfe, 0x0a, 0xc0, 0x96, 0xe3, 0x92, 0x81, 0xa8, 0x43, 0xa5, 0xe5, 0x97, + 0x2c, 0x86, 0x93, 0x75, 0xce, 0x28, 0x86, 0x6b, 0xb7, 0x30, 0xa3, 0x31, 0xd6, 0xd0, 0xb1, 0xe5, + 0x61, 0xe4, 0xac, 0xc7, 0x8c, 0x35, 0x74, 0xec, 0xf8, 0x55, 0x2e, 0x77, 0xd5, 0xab, 0xdc, 0x49, + 0x06, 0x56, 0x65, 0xec, 0x1a, 0xbb, 0xdf, 0x2f, 0x43, 0x49, 0x84, 0xb1, 0xb3, 0x84, 0x8e, 0x3f, + 0xf0, 0x0b, 0x5c, 0xbb, 0x85, 0x8b, 0x82, 0xdd, 0xb6, 0xd1, 0x26, 0x94, 0x25, 0x34, 0xf1, 0xd9, + 0x14, 0x08, 0x52, 0x87, 0x0d, 0xff, 0xab, 0x90, 0xdb, 0x77, 0x5c, 0x22, 0x37, 0x7a, 0xaa, 0x03, + 0x98, 0x19, 0x60, 0x7b, 0x01, 0x73, 0x74, 0xb3, 0x18, 0x15, 0xea, 0xf8, 0xf8, 0x64, 0xda, 0x99, + 0x1c, 0x9f, 0xc8, 0x40, 0xe7, 0xc6, 0x27, 0x70, 0x6c, 0x7c, 0x82, 0x2d, 0xc6, 0x27, 0xa1, 0xc9, + 0xf1, 0x09, 0xd2, 0x4f, 0x65, 0x7c, 0x3b, 0x70, 0xa3, 0xe9, 0x9a, 0xd6, 0xa1, 0xeb, 0x04, 0x21, + 0xb1, 0x93, 0x1e, 0xe3, 0x01, 0x14, 0xce, 0x05, 0x9d, 0x97, 0x55, 0x34, 0x25, 0x52, 0xfd, 0xf7, + 0x0c, 0x54, 0xb6, 0x89, 0xe9, 0x86, 0x07, 0xb3, 0xb2, 0x51, 0x48, 0x82, 0x50, 0x5e, 0x56, 0xfc, + 0x37, 0xfa, 0x1a, 0x14, 0xe3, 0x98, 0xe4, 0xca, 0xb7, 0xb9, 0x18, 0x8a, 0x1e, 0xc2, 0x12, 0x3b, + 0x63, 0x74, 0x12, 0x25, 0x3b, 0x97, 0x3d, 0xfb, 0x48, 0x24, 0xbb, 0x64, 0x7c, 0xc2, 0x83, 0x10, + 0xbe, 0x95, 0xf2, 0x38, 0x6a, 0xa2, 0xff, 0x0f, 0x15, 0xfe, 0x6a, 0x11, 0xc5, 0x5c, 0xf9, 0xab, + 0x74, 0x96, 0xc5, 0xc3, 0xa3, 0x88, 0xb7, 0xfe, 0x38, 0x0b, 0x6b, 0x4f, 0xcd, 0xe9, 0x1e, 0x91, + 0x6e, 0x83, 0xd8, 0x98, 0x58, 0xd4, 0xb7, 0x51, 0x2f, 0xe9, 0x6e, 0x2e, 0x79, 0xc7, 0x4c, 0x13, + 0x4e, 0xf7, 0x3a, 0x51, 0x02, 0x96, 0x4d, 0x24, 0x60, 0x6b, 0x90, 0xf7, 0xa8, 0x67, 0x11, 0xe9, + 0x8b, 0x44, 0x43, 0xfd, 0xed, 0x4c, 0xd2, 0xd7, 0x54, 0xe3, 0x37, 0x46, 0x5e, 0x81, 0xea, 0xd0, + 0x30, 0xee, 0x0e, 0x7d, 0x04, 0xd5, 0xbe, 0xae, 0x61, 0x7d, 0xd0, 0xec, 0x7e, 0xdb, 0xe8, 0x37, + 0x76, 0xfa, 0x8d, 0x07, 0xf7, 0x8c, 0x5e, 0x77, 0xe7, 0x3b, 0xf7, 0x1f, 0xde, 0xfb, 0x9a, 0x92, + 0xa9, 0xd6, 0x8e, 0x4f, 0x6a, 0xb7, 0x3a, 0x0d, 0x6d, 0x47, 0x1c, 0x99, 0x3d, 0xfa, 0xb2, 0x6f, + 0xba, 0x81, 0xf9, 0xe0, 0x5e, 0x8f, 0xba, 0x53, 0x86, 0x41, 0x5f, 0x01, 0xb4, 0xa5, 0xe3, 0x8e, + 0x3e, 0x30, 0x22, 0x87, 0xa6, 0x35, 0x35, 0x25, 0x2b, 0xd2, 0x9a, 0x2d, 0xe2, 0x7b, 0x24, 0x6c, + 0xe8, 0xfd, 0xfb, 0x0f, 0x1e, 0x69, 0x4d, 0x8d, 0x1d, 0x82, 0x4a, 0xf2, 0x76, 0x4b, 0x5e, 0xda, + 0x99, 0x0b, 0x2f, 0xed, 0xd9, 0xdd, 0x9f, 0xbd, 0xe0, 0xee, 0xdf, 0x82, 0x35, 0xcb, 0xa7, 0x41, + 0x60, 0xb0, 0x5c, 0x81, 0xd8, 0x73, 0xd9, 0xc8, 0x17, 0xce, 0x4e, 0x37, 0xaf, 0x69, 0x8c, 0xdf, + 0xe7, 0x6c, 0xa9, 0xfe, 0x9a, 0x95, 0x20, 0xf1, 0x9e, 0xd4, 0xdf, 0x5f, 0x64, 0x61, 0x97, 0x73, + 0xe4, 0xb8, 0x64, 0x48, 0x02, 0xf4, 0x0c, 0x56, 0x2d, 0x9f, 0xd8, 0x2c, 0x09, 0x30, 0xdd, 0xe4, + 0xc7, 0xba, 0xff, 0x2f, 0x35, 0x02, 0x8a, 0x05, 0xeb, 0x5a, 0x2c, 0xd5, 0x1f, 0x13, 0x0b, 0xaf, + 0x58, 0xe7, 0xda, 0xe8, 0x63, 0x58, 0x0d, 0x88, 0xeb, 0x78, 0x93, 0x97, 0x86, 0x45, 0xbd, 0x90, + 0xbc, 0x8c, 0xde, 0xd6, 0xae, 0xd2, 0xdb, 0xd7, 0x77, 0x98, 0x94, 0x26, 0x84, 0x9a, 0xe8, 0xec, + 0x74, 0x73, 0xe5, 0x3c, 0x0d, 0xaf, 0x48, 0xcd, 0xb2, 0x5d, 0xed, 0xc0, 0xca, 0xf9, 0xd1, 0xa0, + 0x35, 0xe9, 0x29, 0xb8, 0xc3, 0x89, 0x3c, 0x01, 0xba, 0x05, 0x45, 0x9f, 0x0c, 0x9d, 0x20, 0xf4, + 0x85, 0x99, 0x19, 0x27, 0xa6, 0x30, 0x3f, 0x21, 0xbe, 0xa5, 0xaa, 0xfe, 0x12, 0xcc, 0xf5, 0xc8, + 0x8e, 0x96, 0xed, 0x04, 0xe6, 0x9e, 0x54, 0x59, 0xc4, 0x51, 0x93, 0xed, 0xd8, 0x49, 0x10, 0x87, + 0x75, 0xfc, 0x37, 0xa3, 0xf1, 0xf8, 0x43, 0x7e, 0x59, 0xc6, 0x23, 0x8c, 0xe8, 0x03, 0xd6, 0x5c, + 0xe2, 0x03, 0xd6, 0x35, 0xc8, 0xbb, 0xe4, 0x88, 0xb8, 0xe2, 0xe6, 0xc7, 0xa2, 0x71, 0xe7, 0x1e, + 0x54, 0xa2, 0x2f, 0x25, 0xf9, 0x37, 0x18, 0x45, 0xc8, 0x0d, 0x1a, 0xfd, 0x27, 0xca, 0x02, 0x02, + 0x28, 0x88, 0x9d, 0x2c, 0xde, 0xfd, 0xb4, 0x6e, 0x67, 0xab, 0xfd, 0x58, 0xc9, 0xde, 0xf9, 0x9d, + 0x1c, 0x94, 0xe2, 0x97, 0x27, 0x76, 0xd3, 0x74, 0xf4, 0xe7, 0xd1, 0x51, 0x88, 0xe9, 0x1d, 0xf2, + 0x02, 0xbd, 0x31, 0xab, 0x59, 0x7d, 0x24, 0x9e, 0xda, 0x63, 0x76, 0x54, 0xaf, 0x7a, 0x0b, 0x8a, + 0x8d, 0x7e, 0xbf, 0xfd, 0xb8, 0xa3, 0xb7, 0x94, 0x4f, 0x33, 0xd5, 0x2f, 0x1c, 0x9f, 0xd4, 0xae, + 0xc5, 0xa0, 0x46, 0x20, 0x36, 0x1f, 0x47, 0x69, 0x9a, 0xde, 0x1b, 0xe8, 0x2d, 0xe5, 0x93, 0xec, + 0x3c, 0x8a, 0xd7, 0x60, 0xf8, 0x47, 0x40, 0xa5, 0x1e, 0xd6, 0x7b, 0x0d, 0xcc, 0x3a, 0xfc, 0x34, + 0x2b, 0x4a, 0x69, 0xb3, 0x1e, 0x7d, 0x32, 0x36, 0x7d, 0xd6, 0xe7, 0x46, 0xf4, 0x55, 0xdd, 0x27, + 0x8b, 0xe2, 0x43, 0x91, 0xd9, 0x33, 0x1a, 0x31, 0xed, 0x29, 0xeb, 0x8d, 0xbf, 0x5f, 0x72, 0x35, + 0x8b, 0x73, 0xbd, 0xf5, 0x99, 0xa7, 0x62, 0x5a, 0x54, 0x58, 0xc2, 0xbb, 0x9d, 0x0e, 0x03, 0x7d, + 0x92, 0x9b, 0x9b, 0x1d, 0x9e, 0x78, 0x2c, 0xbf, 0x46, 0xb7, 0xa1, 0x18, 0x3d, 0x6f, 0x2a, 0x9f, + 0xe6, 0xe6, 0x06, 0xa4, 0x45, 0x6f, 0xb3, 0xbc, 0xc3, 0xed, 0xdd, 0x01, 0xff, 0xe8, 0xef, 0x93, + 0xfc, 0x7c, 0x87, 0x07, 0x93, 0xd0, 0xa6, 0x2f, 0x3c, 0x76, 0x66, 0x65, 0xd5, 0xee, 0xd3, 0xbc, + 0xf0, 0x05, 0x31, 0x46, 0x96, 0xec, 0xde, 0x82, 0x22, 0xd6, 0xbf, 0x25, 0xbe, 0x0f, 0xfc, 0xa4, + 0x30, 0xa7, 0x07, 0x93, 0x8f, 0x89, 0xc5, 0x7a, 0xab, 0x41, 0x01, 0xeb, 0x4f, 0xbb, 0xcf, 0x74, + 0xe5, 0x0f, 0x0a, 0x73, 0x7a, 0x30, 0x19, 0x51, 0xfe, 0x95, 0x54, 0xb1, 0x8b, 0x7b, 0xdb, 0x0d, + 0xbe, 0x28, 0xf3, 0x7a, 0xba, 0xfe, 0xf8, 0xc0, 0xf4, 0x88, 0x3d, 0xfb, 0x9e, 0x26, 0x66, 0xdd, + 0xf9, 0x39, 0x28, 0x46, 0x91, 0x2e, 0xda, 0x80, 0xc2, 0xf3, 0x2e, 0x7e, 0xa2, 0x63, 0x65, 0x41, + 0x58, 0x39, 0xe2, 0x3c, 0x17, 0x39, 0x4a, 0x0d, 0x96, 0x9e, 0x36, 0x3a, 0x8d, 0xc7, 0x3a, 0x8e, + 0x4a, 0xee, 0x11, 0x40, 0x86, 0x6b, 0x55, 0x45, 0x76, 0x10, 0xeb, 0x6c, 0xae, 0x7f, 0xff, 0x47, + 0x1b, 0x0b, 0x3f, 0xfc, 0xd1, 0xc6, 0xc2, 0x27, 0x67, 0x1b, 0x99, 0xef, 0x9f, 0x6d, 0x64, 0xfe, + 0xfe, 0x6c, 0x23, 0xf3, 0x6f, 0x67, 0x1b, 0x99, 0xbd, 0x02, 0xbf, 0x54, 0x1e, 0xfe, 0x6f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x80, 0x94, 0x1b, 0x9c, 0x7a, 0x32, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/types.proto b/extender/code/vendor/github.com/docker/swarmkit/api/types.proto new file mode 100644 index 000000000..d25d6625e --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/types.proto @@ -0,0 +1,1086 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; +import "gogoproto/gogo.proto"; + +// This file contains types that are common to objects and spec or that are not +// considered first-class within the cluster object-model. + +// Version tracks the last time an object in the store was updated. +message Version { + uint64 index = 1; +} + +message IndexEntry { + string key = 1; + string val = 2; +} + +// Annotations provide useful information to identify API objects. They are +// common to all API specs. +message Annotations { + string name = 1; + map labels = 2; + + // Indices provides keys and values for indexing this object. + // A single key may have multiple values. + repeated IndexEntry indices = 4 [(gogoproto.nullable) = false]; +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +message NamedGenericResource { + string kind = 1; + string value = 2; +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +message DiscreteGenericResource { + string kind = 1; + int64 value = 2; +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +message GenericResource { + oneof resource { + NamedGenericResource named_resource_spec = 1; + DiscreteGenericResource discrete_resource_spec = 2; + } +} + +enum ResourceType { + TASK = 0; + SECRET = 1; + CONFIG = 2; +} + +message Resources { + // Amount of CPUs (e.g. 2000000000 = 2 CPU cores) + int64 nano_cpus = 1 [(gogoproto.customname) = "NanoCPUs"]; + + // Amount of memory in bytes. + int64 memory_bytes = 2; + + // User specified resource (e.g: bananas=2;apple={red,yellow,green}) + repeated GenericResource generic = 3; +} + +message ResourceRequirements { + Resources limits = 1; + Resources reservations = 2; +} + +message Platform { + // Architecture (e.g. x86_64) + string architecture = 1; + + // Operating System (e.g. linux) + string os = 2 [(gogoproto.customname) = "OS"]; +} + +// PluginDescription describes an engine plugin. +message PluginDescription { + // Type of plugin. Canonical values for existing types are + // Volume, Network, and Authorization. More types could be + // supported in the future. + string type = 1; + + // Name of the plugin + string name = 2; +} + +message EngineDescription { + // Docker daemon version running on the node. + string engine_version = 1; + + // Labels attached to the engine. + map labels = 2; + + // Volume, Network, and Auth plugins + repeated PluginDescription plugins = 3 [(gogoproto.nullable) = false]; +} + +message NodeDescription { + // Hostname of the node as reported by the agent. + // This is different from spec.meta.name which is user-defined. + string hostname = 1; + + // Platform of the node. + Platform platform = 2; + + // Total resources on the node. + Resources resources = 3; + + // Information about the Docker Engine on the node. + EngineDescription engine = 4; + + // Information on the node's TLS setup + NodeTLSInfo tls_info = 5 [(gogoproto.customname) = "TLSInfo"]; + + // FIPS indicates whether the node has FIPS-enabled + bool fips = 6 [(gogoproto.customname) = "FIPS"]; +} + +message NodeTLSInfo { + // Information about which root certs the node trusts + bytes trust_root = 1; + + // Information about the node's current TLS certificate + bytes cert_issuer_subject = 2; + bytes cert_issuer_public_key = 3; +} + +message RaftMemberStatus { + bool leader = 1; + + enum Reachability { + // Unknown indicates that the manager state cannot be resolved + UNKNOWN = 0; + + // Unreachable indicates that the node cannot be contacted by other + // raft cluster members. + UNREACHABLE = 1; + + // Reachable indicates that the node is healthy and reachable + // by other members. + REACHABLE = 2; + } + + Reachability reachability = 2; + string message = 3; +} + +message NodeStatus { + // TODO(aluzzardi) These should be using `gogoproto.enumvalue_customname`. + enum State { + // Unknown indicates the node state cannot be resolved. + UNKNOWN = 0; + + // Down indicates the node is down. + DOWN = 1; + + // Ready indicates the node is ready to accept tasks. + READY = 2; + + // Disconnected indicates the node is currently trying to find new manager. + DISCONNECTED = 3; + } + + State state = 1; + string message = 2; + // Addr is the node's IP address as observed by the manager + string addr = 3; +} + +message Image { + // reference is a docker image reference. This can include a rpository, tag + // or be fully qualified witha digest. The format is specified in the + // distribution/reference package. + string reference = 1; +} + +// Mount describes volume mounts for a container. +// +// The Mount type follows the structure of the mount syscall, including a type, +// source, target. Top-level flags, such as writable, are common to all kinds +// of mounts, where we also provide options that are specific to a type of +// mount. This corresponds to flags and data, respectively, in the syscall. +message Mount { + enum Type { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "MountType"; + + BIND = 0 [(gogoproto.enumvalue_customname) = "MountTypeBind"]; // Bind mount host dir + VOLUME = 1 [(gogoproto.enumvalue_customname) = "MountTypeVolume"]; // Remote storage volumes + TMPFS = 2 [(gogoproto.enumvalue_customname) = "MountTypeTmpfs"]; // Mount a tmpfs + } + + // Type defines the nature of the mount. + Type type = 1; + + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + string source = 2; + + // Target path in container + string target = 3; + + // ReadOnly should be set to true if the mount should not be writable. + bool readonly = 4 [(gogoproto.customname) = "ReadOnly"]; + + // Consistency indicates the tolerable level of file system consistency + enum Consistency { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "MountConsistency"; + + DEFAULT = 0 [(gogoproto.enumvalue_customname) = "MountConsistencyDefault"]; + CONSISTENT = 1 [(gogoproto.enumvalue_customname) = "MountConsistencyFull"]; + CACHED = 2 [(gogoproto.enumvalue_customname) = "MountConsistencyCached"]; + DELEGATED = 3 [(gogoproto.enumvalue_customname) = "MountConsistencyDelegated"]; + } + Consistency consistency = 8; + + // BindOptions specifies options that are specific to a bind mount. + message BindOptions { + enum Propagation { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "MountPropagation"; + + RPRIVATE = 0 [(gogoproto.enumvalue_customname) = "MountPropagationRPrivate"]; + PRIVATE = 1 [(gogoproto.enumvalue_customname) = "MountPropagationPrivate"]; + RSHARED = 2 [(gogoproto.enumvalue_customname) = "MountPropagationRShared"]; + SHARED = 3 [(gogoproto.enumvalue_customname) = "MountPropagationShared"]; + RSLAVE = 4 [(gogoproto.enumvalue_customname) = "MountPropagationRSlave"]; + SLAVE = 5 [(gogoproto.enumvalue_customname) = "MountPropagationSlave"]; + } + + // Propagation mode of mount. + Propagation propagation = 1; + } + + // VolumeOptions contains parameters for mounting the volume. + message VolumeOptions { + // nocopy prevents automatic copying of data to the volume with data from target + bool nocopy = 1 [(gogoproto.customname) = "NoCopy"]; + + // labels to apply to the volume if creating + map labels = 2; + + // DriverConfig specifies the options that may be passed to the driver + // if the volume is created. + // + // If this is empty, no volume will be created if the volume is missing. + Driver driver_config = 3; + } + + message TmpfsOptions { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be convered to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + int64 size_bytes = 1; + + // Mode of the tmpfs upon creation + uint32 mode = 2 [(gogoproto.customtype) = "os.FileMode", (gogoproto.nullable) = false]; + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. + } + + // Depending on type, one of bind_options or volumes_options will be set. + + // BindOptions configures properties of a bind mount type. + // + // For mounts of type bind, the source must be an absolute host path. + BindOptions bind_options = 5; + + // VolumeOptions configures the properties specific to a volume mount type. + // + // For mounts of type volume, the source will be used as the volume name. + VolumeOptions volume_options = 6; + + // TmpfsOptions allows one to set options for mounting a temporary + // filesystem. + // + // The source field will be ignored when using mounts of type tmpfs. + TmpfsOptions tmpfs_options = 7; + + // TODO(stevvooe): It be better to use a oneof field above, although the + // type is enough to make the decision, while being primary to the + // datastructure. +} + +message RestartPolicy { + enum RestartCondition { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "RestartCondition"; + NONE = 0 [(gogoproto.enumvalue_customname) = "RestartOnNone"]; + ON_FAILURE = 1 [(gogoproto.enumvalue_customname) = "RestartOnFailure"]; + ANY = 2 [(gogoproto.enumvalue_customname) = "RestartOnAny"]; + } + + RestartCondition condition = 1; + + // Delay between restart attempts + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration delay = 2; + + // MaxAttempts is the maximum number of restarts to attempt on an + // instance before giving up. Ignored if 0. + uint64 max_attempts = 3; + + // Window is the time window used to evaluate the restart policy. + // The time window is unbounded if this is 0. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration window = 4; +} + +// UpdateConfig specifies the rate and policy of updates. +// TODO(aluzzardi): Consider making this a oneof with RollingStrategy and LockstepStrategy. +message UpdateConfig { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + uint64 parallelism = 1; + + // Amount of time between updates. + google.protobuf.Duration delay = 2 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; + + enum FailureAction { + PAUSE = 0; + CONTINUE = 1; + ROLLBACK = 2; + } + + // FailureAction is the action to take when an update failures. + FailureAction failure_action = 3; + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration monitor = 4; + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + // If the failure action is ROLLBACK, the orchestrator will attempt to + // roll back to the previous service spec. If the MaxFailureRatio + // threshold is hit during the rollback, the rollback will pause. + float max_failure_ratio = 5; + + // UpdateOrder controls the order of operations when rolling out an + // updated task. Either the old task is shut down before the new task + // is started, or the new task is started before the old task is shut + // down. + enum UpdateOrder { + STOP_FIRST = 0; + START_FIRST = 1; + } + + UpdateOrder order = 6; +} + +// UpdateStatus is the status of an update in progress. +message UpdateStatus { + enum UpdateState { + UNKNOWN = 0; + UPDATING = 1; + PAUSED = 2; + COMPLETED = 3; + ROLLBACK_STARTED = 4; + ROLLBACK_PAUSED = 5; // if a rollback fails + ROLLBACK_COMPLETED = 6; + } + + // State is the state of this update. It indicates whether the + // update is in progress, completed, paused, rolling back, or + // finished rolling back. + UpdateState state = 1; + + // StartedAt is the time at which the update was started. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp started_at = 2; + + // CompletedAt is the time at which the update completed successfully, + // paused, or finished rolling back. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp completed_at = 3; + + // TODO(aaronl): Consider adding a timestamp showing when the most + // recent task update took place. Currently, this is nontrivial + // because each service update kicks off a replacement update, so + // updating the service object with a timestamp at every step along + // the rolling update would cause the rolling update to be constantly + // restarted. + + // Message explains how the update got into its current state. For + // example, if the update is paused, it will explain what is preventing + // the update from proceeding (typically the failure of a task to start up + // when OnFailure is PAUSE). + string message = 4; +} + +// TaskState enumerates the states that a task progresses through within an +// agent. States are designed to be monotonically increasing, such that if two +// states are seen by a task, the greater of the new represents the true state. + +// Only the manager create a NEW task, and move the task to PENDING and ASSIGNED. +// Afterward, the manager must rely on the agent to update the task status +// (pre-run: preparing, ready, starting; +// running; +// end-state: complete, shutdown, failed, rejected) +enum TaskState { + // TODO(aluzzardi): Move it back into `TaskStatus` because of the naming + // collisions of enums. + + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "TaskState"; + NEW = 0 [(gogoproto.enumvalue_customname)="TaskStateNew"]; + PENDING = 64 [(gogoproto.enumvalue_customname)="TaskStatePending"]; // waiting for scheduling decision + ASSIGNED = 192 [(gogoproto.enumvalue_customname)="TaskStateAssigned"]; + ACCEPTED = 256 [(gogoproto.enumvalue_customname)="TaskStateAccepted"]; // task has been accepted by an agent. + PREPARING = 320 [(gogoproto.enumvalue_customname)="TaskStatePreparing"]; + READY = 384 [(gogoproto.enumvalue_customname)="TaskStateReady"]; + STARTING = 448 [(gogoproto.enumvalue_customname)="TaskStateStarting"]; + RUNNING = 512 [(gogoproto.enumvalue_customname)="TaskStateRunning"]; + COMPLETE = 576 [(gogoproto.enumvalue_customname)="TaskStateCompleted"]; // successful completion of task (not error code, just ran) + SHUTDOWN = 640 [(gogoproto.enumvalue_customname)="TaskStateShutdown"]; // orchestrator requested shutdown + FAILED = 704 [(gogoproto.enumvalue_customname)="TaskStateFailed"]; // task execution failed with error + // TaskStateRejected means a task never ran, for instance if something about + // the environment failed (e.g. setting up a port on that node failed). + REJECTED = 768 [(gogoproto.enumvalue_customname)="TaskStateRejected"]; // task could not be executed here. + // TaskStateRemove is used to correctly handle service deletions and scale + // downs. This allows us to keep track of tasks that have been marked for + // deletion, but can't yet be removed because the agent is in the process of + // shutting them down. Once the agent has shut down tasks with desired state + // REMOVE, the task reaper is responsible for removing them. + REMOVE = 800 [(gogoproto.enumvalue_customname)="TaskStateRemove"]; + // TaskStateOrphaned is used to free up resources associated with service + // tasks on unresponsive nodes without having to delete those tasks. This + // state is directly assigned to the task by the orchestrator. + ORPHANED = 832 [(gogoproto.enumvalue_customname)="TaskStateOrphaned"]; + + // NOTE(stevvooe): The state of a task is actually a lamport clock, in that + // given two observations, the greater of the two can be considered + // correct. To enforce this, we only allow tasks to proceed to a greater + // state. + // + // A byproduct of this design decision is that we must also maintain this + // invariant in the protobuf enum values, such that when comparing two + // values, the one with the greater value is also the greater state. + // + // Because we may want to add intervening states a later date, we've left + // 64 spaces between each one. This should allow us to make 5 or 6 + // insertions between each state if we find that we made a mistake and need + // another state. + // + // Remove this message when the states are deemed perfect. +} + +// Container specific status. +message ContainerStatus { + string container_id = 1; + + int32 pid = 2 [(gogoproto.customname) = "PID"]; + int32 exit_code = 3; +} + +// PortStatus specifies the actual allocated runtime state of a list +// of port configs. +message PortStatus { + repeated PortConfig ports = 1; +} + +message TaskStatus { + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp timestamp = 1; + + // State expresses the current state of the task. + TaskState state = 2; + + // Message reports a message for the task status. This should provide a + // human readable message that can point to how the task actually arrived + // at a current state. + // + // As a convention, we place the a small message here that led to the + // current state. For example, if the task is in ready, because it was + // prepared, we'd place "prepared" in this field. If we skipped preparation + // because the task is prepared, we would put "already prepared" in this + // field. + string message = 3; + + // Err is set if the task is in an error state, or is unable to + // progress from an earlier state because a precondition is + // unsatisfied. + // + // The following states should report a companion error: + // + // FAILED, REJECTED + // + // In general, messages that should be surfaced to users belong in the + // Err field, and notes on routine state transitions belong in Message. + // + // TODO(stevvooe) Integrate this field with the error interface. + string err = 4; + + // Container status contains container specific status information. + oneof runtime_status { + ContainerStatus container = 5; + } + + // HostPorts provides a list of ports allocated at the host + // level. + PortStatus port_status = 6; + + // AppliedBy gives the node ID of the manager that applied this task + // status update to the Task object. + string applied_by = 7; + + // AppliedAt gives a timestamp of when this status update was applied to + // the Task object. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp applied_at = 8; +} + +// NetworkAttachmentConfig specifies how a service should be attached to a particular network. +// +// For now, this is a simple struct, but this can include future information +// instructing Swarm on how this service should work on the particular +// network. +message NetworkAttachmentConfig { + // Target specifies the target network for attachment. This value must be a + // network ID. + string target = 1; + // Aliases specifies a list of discoverable alternate names for the service on this Target. + repeated string aliases = 2; + // Addresses specifies a list of ipv4 and ipv6 addresses + // preferred. If these addresses are not available then the + // attachment might fail. + repeated string addresses = 3; + // DriverAttachmentOpts is a map of driver attachment options for the network target + map driver_attachment_opts = 4; +} + +// IPAMConfig specifies parameters for IP Address Management. +message IPAMConfig { + // TODO(stevvooe): It may make more sense to manage IPAM and network + // definitions separately. This will allow multiple networks to share IPAM + // instances. For now, we will follow the conventions of libnetwork and + // specify this as part of the network specification. + + // AddressFamily specifies the network address family that + // this IPAMConfig belongs to. + enum AddressFamily { + UNKNOWN = 0; // satisfy proto3 + IPV4 = 4; + IPV6 = 6; + } + + AddressFamily family = 1; + + // Subnet defines a network as a CIDR address (ie network and mask + // 192.168.0.1/24). + string subnet = 2; + + // Range defines the portion of the subnet to allocate to tasks. This is + // defined as a subnet within the primary subnet. + string range = 3; + + // Gateway address within the subnet. + string gateway = 4; + + // Reserved is a list of address from the master pool that should *not* be + // allocated. These addresses may have already been allocated or may be + // reserved for another allocation manager. + map reserved = 5; +} + +// PortConfig specifies an exposed port which can be +// addressed using the given name. This can be later queried +// using a service discovery api or a DNS SRV query. The node +// port specifies a port that can be used to address this +// service external to the cluster by sending a connection +// request to this port to any node on the cluster. +message PortConfig { + enum Protocol { + option (gogoproto.goproto_enum_prefix) = false; + + TCP = 0 [(gogoproto.enumvalue_customname) = "ProtocolTCP"]; + UDP = 1 [(gogoproto.enumvalue_customname) = "ProtocolUDP"]; + SCTP = 2 [(gogoproto.enumvalue_customname) = "ProtocolSCTP"]; + } + + // PublishMode controls how ports are published on the swarm. + enum PublishMode { + option (gogoproto.enum_customname) = "PublishMode"; + option (gogoproto.goproto_enum_prefix) = false; + + // PublishModeIngress exposes the port across the cluster on all nodes. + INGRESS = 0 [(gogoproto.enumvalue_customname) = "PublishModeIngress"]; + + // PublishModeHost exposes the port on just the target host. If the + // published port is undefined, an ephemeral port will be allocated. If + // the published port is defined, the node will attempt to allocate it, + // erroring the task if it fails. + HOST = 1 [(gogoproto.enumvalue_customname) = "PublishModeHost"]; + } + + // Name for the port. If provided the port information can + // be queried using the name as in a DNS SRV query. + string name = 1; + + // Protocol for the port which is exposed. + Protocol protocol = 2; + + // The port which the application is exposing and is bound to. + uint32 target_port = 3; + + // PublishedPort specifies the port on which the service is exposed. If + // specified, the port must be within the available range. If not specified + // (value is zero), an available port is automatically assigned. + uint32 published_port = 4; + + // PublishMode controls how the port is published. + PublishMode publish_mode = 5; +} + +// Driver is a generic driver type to be used throughout the API. For now, a +// driver is simply a name and set of options. The field contents depend on the +// target use case and driver application. For example, a network driver may +// have different rules than a volume driver. +message Driver { + string name = 1; + map options = 2; +} + +message IPAMOptions { + Driver driver = 1; + repeated IPAMConfig configs = 3; +} + +// Peer should be used anywhere where we are describing a remote peer. +message Peer { + string node_id = 1; + string addr = 2; +} + +// WeightedPeer should be used anywhere where we are describing a remote peer +// with a weight. +message WeightedPeer { + Peer peer = 1; + int64 weight = 2; +} + + +message IssuanceStatus { + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "IssuanceStateUnknown"]; + // A new certificate should be issued + RENEW = 1 [(gogoproto.enumvalue_customname)="IssuanceStateRenew"]; + // Certificate is pending acceptance + PENDING = 2 [(gogoproto.enumvalue_customname)="IssuanceStatePending"]; + // successful completion certificate issuance + ISSUED = 3 [(gogoproto.enumvalue_customname)="IssuanceStateIssued"]; + // Certificate issuance failed + FAILED = 4 [(gogoproto.enumvalue_customname)="IssuanceStateFailed"]; + // Signals workers to renew their certificate. From the CA's perspective + // this is equivalent to IssuanceStateIssued: a noop. + ROTATE = 5 [(gogoproto.enumvalue_customname)="IssuanceStateRotate"]; + } + State state = 1; + + // Err is set if the Certificate Issuance is in an error state. + // The following states should report a companion error: + // FAILED + string err = 2; +} + +message AcceptancePolicy { + message RoleAdmissionPolicy { + message Secret { + // The actual content (possibly hashed) + bytes data = 1; + // The type of hash we are using, or "plaintext" + string alg = 2; + } + + NodeRole role = 1; + // Autoaccept controls which roles' certificates are automatically + // issued without administrator intervention. + bool autoaccept = 2; + // Secret represents a user-provided string that is necessary for new + // nodes to join the cluster + Secret secret = 3; + } + + repeated RoleAdmissionPolicy policies = 1; +} + +message ExternalCA { + enum CAProtocol { + CFSSL = 0 [(gogoproto.enumvalue_customname) = "CAProtocolCFSSL"]; + } + + // Protocol is the protocol used by this external CA. + CAProtocol protocol = 1; + + // URL is the URL where the external CA can be reached. + string url = 2 [(gogoproto.customname) = "URL"]; + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + map options = 3; + + // CACert specifies which root CA is used by this external CA + bytes ca_cert = 4 [(gogoproto.customname) = "CACert"]; +} + +message CAConfig { + // NodeCertExpiry is the duration certificates should be issued for + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration node_cert_expiry = 1; + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + repeated ExternalCA external_cas = 2 [(gogoproto.customname) = "ExternalCAs"]; + + // SigningCACert is the desired CA certificate to be used as the root and + // signing CA for the swarm. If not provided, indicates that we are either happy + // with the current configuration, or (together with a bump in the ForceRotate value) + // that we want a certificate and key generated for us. + bytes signing_ca_cert = 3 [(gogoproto.customname) = "SigningCACert"]; + + // SigningCAKey is the desired private key, matching the signing CA cert, to be used + // to sign certificates for the swarm + bytes signing_ca_key = 4 [(gogoproto.customname) = "SigningCAKey"]; + + // ForceRotate is a counter that triggers a root CA rotation even if no relevant + // parameters have been in the spec. This will force the manager to generate a new + // certificate and key, if none have been provided. + uint64 force_rotate = 5; +} + +// OrchestrationConfig defines cluster-level orchestration settings. +message OrchestrationConfig { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + int64 task_history_retention_limit = 1; + +} + +// TaskDefaults specifies default values for task creation. +message TaskDefaults { + // LogDriver specifies the log driver to use for the cluster if not + // specified for each task. + // + // If this is changed, only new tasks will pick up the new log driver. + // Existing tasks will continue to use the previous default until rescheduled. + Driver log_driver = 1; +} + +// DispatcherConfig defines cluster-level dispatcher settings. +message DispatcherConfig { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration heartbeat_period = 1; +} + +// RaftConfig defines raft settings for the cluster. +message RaftConfig { + // SnapshotInterval is the number of log entries between snapshots. + uint64 snapshot_interval = 1; + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + uint64 keep_old_snapshots = 2; + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + uint64 log_entries_for_slow_followers = 3; + // HeartbeatTick defines the amount of ticks (in seconds) between + // each heartbeat message sent to other members for health-check. + uint32 heartbeat_tick = 4; + // ElectionTick defines the amount of ticks (in seconds) needed + // without a leader to trigger a new election. + uint32 election_tick = 5; +} + +message EncryptionConfig { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + bool auto_lock_managers = 1; +} + +message SpreadOver { + string spread_descriptor = 1; // label descriptor, such as engine.labels.az + // TODO: support node information beyond engine and node labels + + // TODO: in the future, add a map that provides weights for weighted + // spreading. +} + +message PlacementPreference { + oneof Preference { + SpreadOver spread = 1; + } +} + +// Placement specifies task distribution constraints. +message Placement { + // Constraints specifies a set of requirements a node should meet for a task. + repeated string constraints = 1; + + // Preferences provide a way to make the scheduler aware of factors + // such as topology. They are provided in order from highest to lowest + // precedence. + repeated PlacementPreference preferences = 2; + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + repeated Platform platforms = 3; +} + +// JoinToken contains the join tokens for workers and managers. +message JoinTokens { + // Worker is the join token workers may use to join the swarm. + string worker = 1; + + // Manager is the join token workers may use to join the swarm. + string manager = 2; +} + +message RootCA { + // CAKey is the root CA private key. + bytes ca_key = 1 [(gogoproto.customname) = "CAKey"]; + + // CACert is the root CA certificate. + bytes ca_cert = 2 [(gogoproto.customname) = "CACert"]; + + // CACertHash is the digest of the CA Certificate. + string ca_cert_hash = 3 [(gogoproto.customname) = "CACertHash"]; + + // JoinTokens contains the join tokens for workers and managers. + JoinTokens join_tokens = 4 [(gogoproto.nullable) = false]; + + // RootRotation contains the new root cert and key we want to rotate to - if this is nil, we are not in the + // middle of a root rotation + RootRotation root_rotation = 5; + + // LastForcedRotation matches the Cluster Spec's CAConfig's ForceRotation counter. + // It indicates when the current CA cert and key were generated (or updated). + uint64 last_forced_rotation = 6; +} + + +enum NodeRole { + option (gogoproto.enum_customname) = "NodeRole"; + option (gogoproto.goproto_enum_prefix) = false; + + WORKER = 0 [(gogoproto.enumvalue_customname) = "NodeRoleWorker"]; + MANAGER = 1 [(gogoproto.enumvalue_customname) = "NodeRoleManager"]; +} + +message Certificate { + NodeRole role = 1; + + bytes csr = 2 [(gogoproto.customname) = "CSR"]; + + IssuanceStatus status = 3 [(gogoproto.nullable) = false]; + + bytes certificate = 4; + + // CN represents the node ID. + string cn = 5 [(gogoproto.customname) = "CN"]; +} + + +// Symmetric keys to encrypt inter-agent communication. +message EncryptionKey { + // Agent subsystem the key is intended for. Example: + // networking:gossip + string subsystem = 1; + + // Encryption algorithm that can implemented using this key + enum Algorithm { + option (gogoproto.goproto_enum_prefix) = false; + + AES_128_GCM = 0; + } + + Algorithm algorithm = 2; + + bytes key = 3; + + // Time stamp from the lamport clock of the key allocator to + // identify the relative age of the key. + uint64 lamport_time = 4; +} + +// ManagerStatus provides informations about the state of a manager in the cluster. +message ManagerStatus { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + uint64 raft_id = 1; + + // Addr is the address advertised to raft. + string addr = 2; + + // Leader is set to true if this node is the raft leader. + bool leader = 3; + + // Reachability specifies whether this node is reachable. + RaftMemberStatus.Reachability reachability = 4; +} + +// FileTarget represents a specific target that is backed by a file +message FileTarget { + // Name represents the final filename in the filesystem + string name = 1; + + // UID represents the file UID + string uid = 2 [(gogoproto.customname) = "UID"]; + + // GID represents the file GID + string gid = 3 [(gogoproto.customname) = "GID"]; + + // Mode represents the FileMode of the file + uint32 mode = 4 [(gogoproto.customtype) = "os.FileMode", (gogoproto.nullable) = false]; +} + +// SecretReference is the linkage between a service and a secret that it uses. +message SecretReference { + // SecretID represents the ID of the specific Secret that we're + // referencing. This identifier exists so that SecretReferences don't leak + // any information about the secret contents. + string secret_id = 1; + + // SecretName is the name of the secret that this references, but this is just provided for + // lookup/display purposes. The secret in the reference will be identified by its ID. + string secret_name = 2; + + // Target specifies how this secret should be exposed to the task. + oneof target { + FileTarget file = 3; + } +} + +// ConfigReference is the linkage between a service and a config that it uses. +message ConfigReference { + // ConfigID represents the ID of the specific Config that we're + // referencing. + string config_id = 1; + + // ConfigName is the name of the config that this references, but this is just provided for + // lookup/display purposes. The config in the reference will be identified by its ID. + string config_name = 2; + + // Target specifies how this secret should be exposed to the task. + oneof target { + FileTarget file = 3; + } +} + +// BlacklistedCertificate is a record for a blacklisted certificate. It does not +// contain the certificate's CN, because these records are indexed by CN. +message BlacklistedCertificate { + // Expiry is the latest known expiration time of a certificate that + // was issued for the given CN. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp expiry = 1; +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +message HealthConfig { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + repeated string test = 1; + + // Interval is the time to wait between checks. Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration interval = 2; + + // Timeout is the time to wait before considering the check to have hung. + // Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration timeout = 3; + + // Retries is the number of consecutive failures needed to consider a + // container as unhealthy. Zero means inherit. + int32 retries = 4; + + // Start period is the period for container initialization during + // which health check failures will note count towards the maximum + // number of retries. + google.protobuf.Duration start_period = 5; +} + +message MaybeEncryptedRecord { + enum Algorithm { + NONE = 0 [(gogoproto.enumvalue_customname) = "NotEncrypted"]; + SECRETBOX_SALSA20_POLY1305 = 1 [(gogoproto.enumvalue_customname) = "NACLSecretboxSalsa20Poly1305"]; + FERNET_AES_128_CBC = 2 [(gogoproto.enumvalue_customname) = "FernetAES128CBC"]; + } + + Algorithm algorithm = 1; + bytes data = 2; + bytes nonce = 3; +} + + +message RootRotation { + bytes ca_cert = 1 [(gogoproto.customname) = "CACert"]; + bytes ca_key = 2 [(gogoproto.customname) = "CAKey"]; + // cross-signed CA cert is the CACert that has been cross-signed by the previous root + bytes cross_signed_ca_cert = 3 [(gogoproto.customname) = "CrossSignedCACert"]; +} + +// Privileges specifies security configuration/permissions. +message Privileges { + // CredentialSpec for managed service account (Windows only). + message CredentialSpec { + oneof source { + string file = 1; + string registry = 2; + } + } + CredentialSpec credential_spec = 1; + + // SELinuxContext contains the SELinux labels for the container. + message SELinuxContext { + bool disable = 1; + + string user = 2; + string role = 3; + string type = 4; + string level = 5; + } + SELinuxContext selinux_context = 2 [(gogoproto.customname) = "SELinuxContext"]; +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/watch.pb.go b/extender/code/vendor/github.com/docker/swarmkit/api/watch.pb.go new file mode 100644 index 000000000..9d152514c --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/watch.pb.go @@ -0,0 +1,4581 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/watch.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// WatchActionKind distinguishes between creations, updates, and removals. It +// is structured as a bitmap so multiple kinds of events can be requested with +// a mask. +type WatchActionKind int32 + +const ( + WatchActionKindUnknown WatchActionKind = 0 + WatchActionKindCreate WatchActionKind = 1 + WatchActionKindUpdate WatchActionKind = 2 + WatchActionKindRemove WatchActionKind = 4 +) + +var WatchActionKind_name = map[int32]string{ + 0: "WATCH_ACTION_UNKNOWN", + 1: "WATCH_ACTION_CREATE", + 2: "WATCH_ACTION_UPDATE", + 4: "WATCH_ACTION_REMOVE", +} +var WatchActionKind_value = map[string]int32{ + "WATCH_ACTION_UNKNOWN": 0, + "WATCH_ACTION_CREATE": 1, + "WATCH_ACTION_UPDATE": 2, + "WATCH_ACTION_REMOVE": 4, +} + +func (x WatchActionKind) String() string { + return proto.EnumName(WatchActionKind_name, int32(x)) +} +func (WatchActionKind) EnumDescriptor() ([]byte, []int) { return fileDescriptorWatch, []int{0} } + +type Object struct { + // Types that are valid to be assigned to Object: + // *Object_Node + // *Object_Service + // *Object_Network + // *Object_Task + // *Object_Cluster + // *Object_Secret + // *Object_Resource + // *Object_Extension + // *Object_Config + Object isObject_Object `protobuf_oneof:"Object"` +} + +func (m *Object) Reset() { *m = Object{} } +func (*Object) ProtoMessage() {} +func (*Object) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{0} } + +type isObject_Object interface { + isObject_Object() + MarshalTo([]byte) (int, error) + Size() int +} + +type Object_Node struct { + Node *Node `protobuf:"bytes,1,opt,name=node,oneof"` +} +type Object_Service struct { + Service *Service `protobuf:"bytes,2,opt,name=service,oneof"` +} +type Object_Network struct { + Network *Network `protobuf:"bytes,3,opt,name=network,oneof"` +} +type Object_Task struct { + Task *Task `protobuf:"bytes,4,opt,name=task,oneof"` +} +type Object_Cluster struct { + Cluster *Cluster `protobuf:"bytes,5,opt,name=cluster,oneof"` +} +type Object_Secret struct { + Secret *Secret `protobuf:"bytes,6,opt,name=secret,oneof"` +} +type Object_Resource struct { + Resource *Resource `protobuf:"bytes,7,opt,name=resource,oneof"` +} +type Object_Extension struct { + Extension *Extension `protobuf:"bytes,8,opt,name=extension,oneof"` +} +type Object_Config struct { + Config *Config `protobuf:"bytes,9,opt,name=config,oneof"` +} + +func (*Object_Node) isObject_Object() {} +func (*Object_Service) isObject_Object() {} +func (*Object_Network) isObject_Object() {} +func (*Object_Task) isObject_Object() {} +func (*Object_Cluster) isObject_Object() {} +func (*Object_Secret) isObject_Object() {} +func (*Object_Resource) isObject_Object() {} +func (*Object_Extension) isObject_Object() {} +func (*Object_Config) isObject_Object() {} + +func (m *Object) GetObject() isObject_Object { + if m != nil { + return m.Object + } + return nil +} + +func (m *Object) GetNode() *Node { + if x, ok := m.GetObject().(*Object_Node); ok { + return x.Node + } + return nil +} + +func (m *Object) GetService() *Service { + if x, ok := m.GetObject().(*Object_Service); ok { + return x.Service + } + return nil +} + +func (m *Object) GetNetwork() *Network { + if x, ok := m.GetObject().(*Object_Network); ok { + return x.Network + } + return nil +} + +func (m *Object) GetTask() *Task { + if x, ok := m.GetObject().(*Object_Task); ok { + return x.Task + } + return nil +} + +func (m *Object) GetCluster() *Cluster { + if x, ok := m.GetObject().(*Object_Cluster); ok { + return x.Cluster + } + return nil +} + +func (m *Object) GetSecret() *Secret { + if x, ok := m.GetObject().(*Object_Secret); ok { + return x.Secret + } + return nil +} + +func (m *Object) GetResource() *Resource { + if x, ok := m.GetObject().(*Object_Resource); ok { + return x.Resource + } + return nil +} + +func (m *Object) GetExtension() *Extension { + if x, ok := m.GetObject().(*Object_Extension); ok { + return x.Extension + } + return nil +} + +func (m *Object) GetConfig() *Config { + if x, ok := m.GetObject().(*Object_Config); ok { + return x.Config + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Object) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Object_OneofMarshaler, _Object_OneofUnmarshaler, _Object_OneofSizer, []interface{}{ + (*Object_Node)(nil), + (*Object_Service)(nil), + (*Object_Network)(nil), + (*Object_Task)(nil), + (*Object_Cluster)(nil), + (*Object_Secret)(nil), + (*Object_Resource)(nil), + (*Object_Extension)(nil), + (*Object_Config)(nil), + } +} + +func _Object_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Object) + // Object + switch x := m.Object.(type) { + case *Object_Node: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Node); err != nil { + return err + } + case *Object_Service: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Service); err != nil { + return err + } + case *Object_Network: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Network); err != nil { + return err + } + case *Object_Task: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Task); err != nil { + return err + } + case *Object_Cluster: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Cluster); err != nil { + return err + } + case *Object_Secret: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Secret); err != nil { + return err + } + case *Object_Resource: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Resource); err != nil { + return err + } + case *Object_Extension: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Extension); err != nil { + return err + } + case *Object_Config: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Object.Object has unexpected type %T", x) + } + return nil +} + +func _Object_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Object) + switch tag { + case 1: // Object.node + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Node) + err := b.DecodeMessage(msg) + m.Object = &Object_Node{msg} + return true, err + case 2: // Object.service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Service) + err := b.DecodeMessage(msg) + m.Object = &Object_Service{msg} + return true, err + case 3: // Object.network + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Network) + err := b.DecodeMessage(msg) + m.Object = &Object_Network{msg} + return true, err + case 4: // Object.task + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Task) + err := b.DecodeMessage(msg) + m.Object = &Object_Task{msg} + return true, err + case 5: // Object.cluster + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cluster) + err := b.DecodeMessage(msg) + m.Object = &Object_Cluster{msg} + return true, err + case 6: // Object.secret + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Secret) + err := b.DecodeMessage(msg) + m.Object = &Object_Secret{msg} + return true, err + case 7: // Object.resource + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Resource) + err := b.DecodeMessage(msg) + m.Object = &Object_Resource{msg} + return true, err + case 8: // Object.extension + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Extension) + err := b.DecodeMessage(msg) + m.Object = &Object_Extension{msg} + return true, err + case 9: // Object.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Config) + err := b.DecodeMessage(msg) + m.Object = &Object_Config{msg} + return true, err + default: + return false, nil + } +} + +func _Object_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Object) + // Object + switch x := m.Object.(type) { + case *Object_Node: + s := proto.Size(x.Node) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Service: + s := proto.Size(x.Service) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Network: + s := proto.Size(x.Network) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Task: + s := proto.Size(x.Task) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Cluster: + s := proto.Size(x.Cluster) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Secret: + s := proto.Size(x.Secret) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Resource: + s := proto.Size(x.Resource) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Extension: + s := proto.Size(x.Extension) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// FIXME(aaronl): These messages should ideally be embedded in SelectBy, but +// protoc generates bad code for that. +type SelectBySlot struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Slot uint64 `protobuf:"varint,2,opt,name=slot,proto3" json:"slot,omitempty"` +} + +func (m *SelectBySlot) Reset() { *m = SelectBySlot{} } +func (*SelectBySlot) ProtoMessage() {} +func (*SelectBySlot) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{1} } + +type SelectByCustom struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Index string `protobuf:"bytes,2,opt,name=index,proto3" json:"index,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *SelectByCustom) Reset() { *m = SelectByCustom{} } +func (*SelectByCustom) ProtoMessage() {} +func (*SelectByCustom) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{2} } + +type SelectBy struct { + // TODO(aaronl): Are all of these things we want to expose in + // the API? Exposing them may commit us to maintaining those + // internal indices going forward. + // + // Types that are valid to be assigned to By: + // *SelectBy_ID + // *SelectBy_IDPrefix + // *SelectBy_Name + // *SelectBy_NamePrefix + // *SelectBy_Custom + // *SelectBy_CustomPrefix + // *SelectBy_ServiceID + // *SelectBy_NodeID + // *SelectBy_Slot + // *SelectBy_DesiredState + // *SelectBy_Role + // *SelectBy_Membership + // *SelectBy_ReferencedNetworkID + // *SelectBy_ReferencedSecretID + // *SelectBy_ReferencedConfigID + // *SelectBy_Kind + By isSelectBy_By `protobuf_oneof:"By"` +} + +func (m *SelectBy) Reset() { *m = SelectBy{} } +func (*SelectBy) ProtoMessage() {} +func (*SelectBy) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{3} } + +type isSelectBy_By interface { + isSelectBy_By() + MarshalTo([]byte) (int, error) + Size() int +} + +type SelectBy_ID struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3,oneof"` +} +type SelectBy_IDPrefix struct { + IDPrefix string `protobuf:"bytes,2,opt,name=id_prefix,json=idPrefix,proto3,oneof"` +} +type SelectBy_Name struct { + Name string `protobuf:"bytes,3,opt,name=name,proto3,oneof"` +} +type SelectBy_NamePrefix struct { + NamePrefix string `protobuf:"bytes,4,opt,name=name_prefix,json=namePrefix,proto3,oneof"` +} +type SelectBy_Custom struct { + Custom *SelectByCustom `protobuf:"bytes,5,opt,name=custom,oneof"` +} +type SelectBy_CustomPrefix struct { + CustomPrefix *SelectByCustom `protobuf:"bytes,6,opt,name=custom_prefix,json=customPrefix,oneof"` +} +type SelectBy_ServiceID struct { + ServiceID string `protobuf:"bytes,7,opt,name=service_id,json=serviceId,proto3,oneof"` +} +type SelectBy_NodeID struct { + NodeID string `protobuf:"bytes,8,opt,name=node_id,json=nodeId,proto3,oneof"` +} +type SelectBy_Slot struct { + Slot *SelectBySlot `protobuf:"bytes,9,opt,name=slot,oneof"` +} +type SelectBy_DesiredState struct { + DesiredState TaskState `protobuf:"varint,10,opt,name=desired_state,json=desiredState,proto3,enum=docker.swarmkit.v1.TaskState,oneof"` +} +type SelectBy_Role struct { + Role NodeRole `protobuf:"varint,11,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole,oneof"` +} +type SelectBy_Membership struct { + Membership NodeSpec_Membership `protobuf:"varint,12,opt,name=membership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership,oneof"` +} +type SelectBy_ReferencedNetworkID struct { + ReferencedNetworkID string `protobuf:"bytes,13,opt,name=referenced_network_id,json=referencedNetworkId,proto3,oneof"` +} +type SelectBy_ReferencedSecretID struct { + ReferencedSecretID string `protobuf:"bytes,14,opt,name=referenced_secret_id,json=referencedSecretId,proto3,oneof"` +} +type SelectBy_ReferencedConfigID struct { + ReferencedConfigID string `protobuf:"bytes,16,opt,name=referenced_config_id,json=referencedConfigId,proto3,oneof"` +} +type SelectBy_Kind struct { + Kind string `protobuf:"bytes,15,opt,name=kind,proto3,oneof"` +} + +func (*SelectBy_ID) isSelectBy_By() {} +func (*SelectBy_IDPrefix) isSelectBy_By() {} +func (*SelectBy_Name) isSelectBy_By() {} +func (*SelectBy_NamePrefix) isSelectBy_By() {} +func (*SelectBy_Custom) isSelectBy_By() {} +func (*SelectBy_CustomPrefix) isSelectBy_By() {} +func (*SelectBy_ServiceID) isSelectBy_By() {} +func (*SelectBy_NodeID) isSelectBy_By() {} +func (*SelectBy_Slot) isSelectBy_By() {} +func (*SelectBy_DesiredState) isSelectBy_By() {} +func (*SelectBy_Role) isSelectBy_By() {} +func (*SelectBy_Membership) isSelectBy_By() {} +func (*SelectBy_ReferencedNetworkID) isSelectBy_By() {} +func (*SelectBy_ReferencedSecretID) isSelectBy_By() {} +func (*SelectBy_ReferencedConfigID) isSelectBy_By() {} +func (*SelectBy_Kind) isSelectBy_By() {} + +func (m *SelectBy) GetBy() isSelectBy_By { + if m != nil { + return m.By + } + return nil +} + +func (m *SelectBy) GetID() string { + if x, ok := m.GetBy().(*SelectBy_ID); ok { + return x.ID + } + return "" +} + +func (m *SelectBy) GetIDPrefix() string { + if x, ok := m.GetBy().(*SelectBy_IDPrefix); ok { + return x.IDPrefix + } + return "" +} + +func (m *SelectBy) GetName() string { + if x, ok := m.GetBy().(*SelectBy_Name); ok { + return x.Name + } + return "" +} + +func (m *SelectBy) GetNamePrefix() string { + if x, ok := m.GetBy().(*SelectBy_NamePrefix); ok { + return x.NamePrefix + } + return "" +} + +func (m *SelectBy) GetCustom() *SelectByCustom { + if x, ok := m.GetBy().(*SelectBy_Custom); ok { + return x.Custom + } + return nil +} + +func (m *SelectBy) GetCustomPrefix() *SelectByCustom { + if x, ok := m.GetBy().(*SelectBy_CustomPrefix); ok { + return x.CustomPrefix + } + return nil +} + +func (m *SelectBy) GetServiceID() string { + if x, ok := m.GetBy().(*SelectBy_ServiceID); ok { + return x.ServiceID + } + return "" +} + +func (m *SelectBy) GetNodeID() string { + if x, ok := m.GetBy().(*SelectBy_NodeID); ok { + return x.NodeID + } + return "" +} + +func (m *SelectBy) GetSlot() *SelectBySlot { + if x, ok := m.GetBy().(*SelectBy_Slot); ok { + return x.Slot + } + return nil +} + +func (m *SelectBy) GetDesiredState() TaskState { + if x, ok := m.GetBy().(*SelectBy_DesiredState); ok { + return x.DesiredState + } + return TaskStateNew +} + +func (m *SelectBy) GetRole() NodeRole { + if x, ok := m.GetBy().(*SelectBy_Role); ok { + return x.Role + } + return NodeRoleWorker +} + +func (m *SelectBy) GetMembership() NodeSpec_Membership { + if x, ok := m.GetBy().(*SelectBy_Membership); ok { + return x.Membership + } + return NodeMembershipPending +} + +func (m *SelectBy) GetReferencedNetworkID() string { + if x, ok := m.GetBy().(*SelectBy_ReferencedNetworkID); ok { + return x.ReferencedNetworkID + } + return "" +} + +func (m *SelectBy) GetReferencedSecretID() string { + if x, ok := m.GetBy().(*SelectBy_ReferencedSecretID); ok { + return x.ReferencedSecretID + } + return "" +} + +func (m *SelectBy) GetReferencedConfigID() string { + if x, ok := m.GetBy().(*SelectBy_ReferencedConfigID); ok { + return x.ReferencedConfigID + } + return "" +} + +func (m *SelectBy) GetKind() string { + if x, ok := m.GetBy().(*SelectBy_Kind); ok { + return x.Kind + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SelectBy) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SelectBy_OneofMarshaler, _SelectBy_OneofUnmarshaler, _SelectBy_OneofSizer, []interface{}{ + (*SelectBy_ID)(nil), + (*SelectBy_IDPrefix)(nil), + (*SelectBy_Name)(nil), + (*SelectBy_NamePrefix)(nil), + (*SelectBy_Custom)(nil), + (*SelectBy_CustomPrefix)(nil), + (*SelectBy_ServiceID)(nil), + (*SelectBy_NodeID)(nil), + (*SelectBy_Slot)(nil), + (*SelectBy_DesiredState)(nil), + (*SelectBy_Role)(nil), + (*SelectBy_Membership)(nil), + (*SelectBy_ReferencedNetworkID)(nil), + (*SelectBy_ReferencedSecretID)(nil), + (*SelectBy_ReferencedConfigID)(nil), + (*SelectBy_Kind)(nil), + } +} + +func _SelectBy_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SelectBy) + // By + switch x := m.By.(type) { + case *SelectBy_ID: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ID) + case *SelectBy_IDPrefix: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.IDPrefix) + case *SelectBy_Name: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Name) + case *SelectBy_NamePrefix: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.NamePrefix) + case *SelectBy_Custom: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Custom); err != nil { + return err + } + case *SelectBy_CustomPrefix: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CustomPrefix); err != nil { + return err + } + case *SelectBy_ServiceID: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ServiceID) + case *SelectBy_NodeID: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.NodeID) + case *SelectBy_Slot: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Slot); err != nil { + return err + } + case *SelectBy_DesiredState: + _ = b.EncodeVarint(10<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.DesiredState)) + case *SelectBy_Role: + _ = b.EncodeVarint(11<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Role)) + case *SelectBy_Membership: + _ = b.EncodeVarint(12<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Membership)) + case *SelectBy_ReferencedNetworkID: + _ = b.EncodeVarint(13<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ReferencedNetworkID) + case *SelectBy_ReferencedSecretID: + _ = b.EncodeVarint(14<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ReferencedSecretID) + case *SelectBy_ReferencedConfigID: + _ = b.EncodeVarint(16<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ReferencedConfigID) + case *SelectBy_Kind: + _ = b.EncodeVarint(15<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Kind) + case nil: + default: + return fmt.Errorf("SelectBy.By has unexpected type %T", x) + } + return nil +} + +func _SelectBy_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SelectBy) + switch tag { + case 1: // By.id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ID{x} + return true, err + case 2: // By.id_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_IDPrefix{x} + return true, err + case 3: // By.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_Name{x} + return true, err + case 4: // By.name_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_NamePrefix{x} + return true, err + case 5: // By.custom + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SelectByCustom) + err := b.DecodeMessage(msg) + m.By = &SelectBy_Custom{msg} + return true, err + case 6: // By.custom_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SelectByCustom) + err := b.DecodeMessage(msg) + m.By = &SelectBy_CustomPrefix{msg} + return true, err + case 7: // By.service_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ServiceID{x} + return true, err + case 8: // By.node_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_NodeID{x} + return true, err + case 9: // By.slot + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SelectBySlot) + err := b.DecodeMessage(msg) + m.By = &SelectBy_Slot{msg} + return true, err + case 10: // By.desired_state + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.By = &SelectBy_DesiredState{TaskState(x)} + return true, err + case 11: // By.role + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.By = &SelectBy_Role{NodeRole(x)} + return true, err + case 12: // By.membership + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.By = &SelectBy_Membership{NodeSpec_Membership(x)} + return true, err + case 13: // By.referenced_network_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ReferencedNetworkID{x} + return true, err + case 14: // By.referenced_secret_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ReferencedSecretID{x} + return true, err + case 16: // By.referenced_config_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ReferencedConfigID{x} + return true, err + case 15: // By.kind + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_Kind{x} + return true, err + default: + return false, nil + } +} + +func _SelectBy_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SelectBy) + // By + switch x := m.By.(type) { + case *SelectBy_ID: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ID))) + n += len(x.ID) + case *SelectBy_IDPrefix: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.IDPrefix))) + n += len(x.IDPrefix) + case *SelectBy_Name: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *SelectBy_NamePrefix: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.NamePrefix))) + n += len(x.NamePrefix) + case *SelectBy_Custom: + s := proto.Size(x.Custom) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SelectBy_CustomPrefix: + s := proto.Size(x.CustomPrefix) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SelectBy_ServiceID: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ServiceID))) + n += len(x.ServiceID) + case *SelectBy_NodeID: + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.NodeID))) + n += len(x.NodeID) + case *SelectBy_Slot: + s := proto.Size(x.Slot) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SelectBy_DesiredState: + n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.DesiredState)) + case *SelectBy_Role: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Role)) + case *SelectBy_Membership: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Membership)) + case *SelectBy_ReferencedNetworkID: + n += proto.SizeVarint(13<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferencedNetworkID))) + n += len(x.ReferencedNetworkID) + case *SelectBy_ReferencedSecretID: + n += proto.SizeVarint(14<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferencedSecretID))) + n += len(x.ReferencedSecretID) + case *SelectBy_ReferencedConfigID: + n += proto.SizeVarint(16<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferencedConfigID))) + n += len(x.ReferencedConfigID) + case *SelectBy_Kind: + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Kind))) + n += len(x.Kind) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type WatchRequest struct { + // Multiple entries are combined using OR logic - i.e. if an event + // matches all of the selectors specified in any single watch entry, + // the event will be sent to the client. + Entries []*WatchRequest_WatchEntry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + // ResumeFrom provides an version to resume the watch from, if non-nil. + // The watch will return changes since this version, and continue to + // return new changes afterwards. Watch will return an error if the + // server has compacted its log and no longer has complete history to + // this point. + ResumeFrom *Version `protobuf:"bytes,2,opt,name=resume_from,json=resumeFrom" json:"resume_from,omitempty"` + // IncludeOldObject causes WatchMessages to include a copy of the + // previous version of the object on updates. Note that only live + // changes will include the old object (not historical changes + // retrieved using ResumeFrom). + IncludeOldObject bool `protobuf:"varint,3,opt,name=include_old_object,json=includeOldObject,proto3" json:"include_old_object,omitempty"` +} + +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{4} } + +type WatchRequest_WatchEntry struct { + // Kind can contain a builtin type such as "node", "secret", etc. or + // the kind specified by a custom-defined object. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // Action (create/update/delete) + // This is a bitmask, so multiple actions may be OR'd together + Action WatchActionKind `protobuf:"varint,2,opt,name=action,proto3,enum=docker.swarmkit.v1.WatchActionKind" json:"action,omitempty"` + // Filters are combined using AND logic - an event must match + // all of them to pass the filter. + Filters []*SelectBy `protobuf:"bytes,3,rep,name=filters" json:"filters,omitempty"` +} + +func (m *WatchRequest_WatchEntry) Reset() { *m = WatchRequest_WatchEntry{} } +func (*WatchRequest_WatchEntry) ProtoMessage() {} +func (*WatchRequest_WatchEntry) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{4, 0} } + +// WatchMessage is the type of the stream that's returned to the client by +// Watch. Note that the first item of this stream will always be a WatchMessage +// with a nil Object, to signal that the stream has started. +type WatchMessage struct { + Events []*WatchMessage_Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + // Index versions this change to the data store. It can be used to + // resume the watch from this point. + Version *Version `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` +} + +func (m *WatchMessage) Reset() { *m = WatchMessage{} } +func (*WatchMessage) ProtoMessage() {} +func (*WatchMessage) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{5} } + +type WatchMessage_Event struct { + // Action (create/update/delete) + // Note that WatchMessage does not expose "commit" events that + // mark transaction boundaries. + Action WatchActionKind `protobuf:"varint,1,opt,name=action,proto3,enum=docker.swarmkit.v1.WatchActionKind" json:"action,omitempty"` + // Matched object + Object *Object `protobuf:"bytes,2,opt,name=object" json:"object,omitempty"` + // For updates, OldObject will optionally be included in the + // watch message, containing the previous version of the + // object, if IncludeOldObject was set in WatchRequest. + OldObject *Object `protobuf:"bytes,3,opt,name=old_object,json=oldObject" json:"old_object,omitempty"` +} + +func (m *WatchMessage_Event) Reset() { *m = WatchMessage_Event{} } +func (*WatchMessage_Event) ProtoMessage() {} +func (*WatchMessage_Event) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{5, 0} } + +func init() { + proto.RegisterType((*Object)(nil), "docker.swarmkit.v1.Object") + proto.RegisterType((*SelectBySlot)(nil), "docker.swarmkit.v1.SelectBySlot") + proto.RegisterType((*SelectByCustom)(nil), "docker.swarmkit.v1.SelectByCustom") + proto.RegisterType((*SelectBy)(nil), "docker.swarmkit.v1.SelectBy") + proto.RegisterType((*WatchRequest)(nil), "docker.swarmkit.v1.WatchRequest") + proto.RegisterType((*WatchRequest_WatchEntry)(nil), "docker.swarmkit.v1.WatchRequest.WatchEntry") + proto.RegisterType((*WatchMessage)(nil), "docker.swarmkit.v1.WatchMessage") + proto.RegisterType((*WatchMessage_Event)(nil), "docker.swarmkit.v1.WatchMessage.Event") + proto.RegisterEnum("docker.swarmkit.v1.WatchActionKind", WatchActionKind_name, WatchActionKind_value) +} + +type authenticatedWrapperWatchServer struct { + local WatchServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperWatchServer(local WatchServer, authorize func(context.Context, []string) error) WatchServer { + return &authenticatedWrapperWatchServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperWatchServer) Watch(r *WatchRequest, stream Watch_WatchServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil { + return err + } + return p.local.Watch(r, stream) +} + +func (m *Object) Copy() *Object { + if m == nil { + return nil + } + o := &Object{} + o.CopyFrom(m) + return o +} + +func (m *Object) CopyFrom(src interface{}) { + + o := src.(*Object) + *m = *o + if o.Object != nil { + switch o.Object.(type) { + case *Object_Node: + v := Object_Node{ + Node: &Node{}, + } + deepcopy.Copy(v.Node, o.GetNode()) + m.Object = &v + case *Object_Service: + v := Object_Service{ + Service: &Service{}, + } + deepcopy.Copy(v.Service, o.GetService()) + m.Object = &v + case *Object_Network: + v := Object_Network{ + Network: &Network{}, + } + deepcopy.Copy(v.Network, o.GetNetwork()) + m.Object = &v + case *Object_Task: + v := Object_Task{ + Task: &Task{}, + } + deepcopy.Copy(v.Task, o.GetTask()) + m.Object = &v + case *Object_Cluster: + v := Object_Cluster{ + Cluster: &Cluster{}, + } + deepcopy.Copy(v.Cluster, o.GetCluster()) + m.Object = &v + case *Object_Secret: + v := Object_Secret{ + Secret: &Secret{}, + } + deepcopy.Copy(v.Secret, o.GetSecret()) + m.Object = &v + case *Object_Resource: + v := Object_Resource{ + Resource: &Resource{}, + } + deepcopy.Copy(v.Resource, o.GetResource()) + m.Object = &v + case *Object_Extension: + v := Object_Extension{ + Extension: &Extension{}, + } + deepcopy.Copy(v.Extension, o.GetExtension()) + m.Object = &v + case *Object_Config: + v := Object_Config{ + Config: &Config{}, + } + deepcopy.Copy(v.Config, o.GetConfig()) + m.Object = &v + } + } + +} + +func (m *SelectBySlot) Copy() *SelectBySlot { + if m == nil { + return nil + } + o := &SelectBySlot{} + o.CopyFrom(m) + return o +} + +func (m *SelectBySlot) CopyFrom(src interface{}) { + + o := src.(*SelectBySlot) + *m = *o +} + +func (m *SelectByCustom) Copy() *SelectByCustom { + if m == nil { + return nil + } + o := &SelectByCustom{} + o.CopyFrom(m) + return o +} + +func (m *SelectByCustom) CopyFrom(src interface{}) { + + o := src.(*SelectByCustom) + *m = *o +} + +func (m *SelectBy) Copy() *SelectBy { + if m == nil { + return nil + } + o := &SelectBy{} + o.CopyFrom(m) + return o +} + +func (m *SelectBy) CopyFrom(src interface{}) { + + o := src.(*SelectBy) + *m = *o + if o.By != nil { + switch o.By.(type) { + case *SelectBy_ID: + v := SelectBy_ID{ + ID: o.GetID(), + } + m.By = &v + case *SelectBy_IDPrefix: + v := SelectBy_IDPrefix{ + IDPrefix: o.GetIDPrefix(), + } + m.By = &v + case *SelectBy_Name: + v := SelectBy_Name{ + Name: o.GetName(), + } + m.By = &v + case *SelectBy_NamePrefix: + v := SelectBy_NamePrefix{ + NamePrefix: o.GetNamePrefix(), + } + m.By = &v + case *SelectBy_Custom: + v := SelectBy_Custom{ + Custom: &SelectByCustom{}, + } + deepcopy.Copy(v.Custom, o.GetCustom()) + m.By = &v + case *SelectBy_CustomPrefix: + v := SelectBy_CustomPrefix{ + CustomPrefix: &SelectByCustom{}, + } + deepcopy.Copy(v.CustomPrefix, o.GetCustomPrefix()) + m.By = &v + case *SelectBy_ServiceID: + v := SelectBy_ServiceID{ + ServiceID: o.GetServiceID(), + } + m.By = &v + case *SelectBy_NodeID: + v := SelectBy_NodeID{ + NodeID: o.GetNodeID(), + } + m.By = &v + case *SelectBy_Slot: + v := SelectBy_Slot{ + Slot: &SelectBySlot{}, + } + deepcopy.Copy(v.Slot, o.GetSlot()) + m.By = &v + case *SelectBy_DesiredState: + v := SelectBy_DesiredState{ + DesiredState: o.GetDesiredState(), + } + m.By = &v + case *SelectBy_Role: + v := SelectBy_Role{ + Role: o.GetRole(), + } + m.By = &v + case *SelectBy_Membership: + v := SelectBy_Membership{ + Membership: o.GetMembership(), + } + m.By = &v + case *SelectBy_ReferencedNetworkID: + v := SelectBy_ReferencedNetworkID{ + ReferencedNetworkID: o.GetReferencedNetworkID(), + } + m.By = &v + case *SelectBy_ReferencedSecretID: + v := SelectBy_ReferencedSecretID{ + ReferencedSecretID: o.GetReferencedSecretID(), + } + m.By = &v + case *SelectBy_ReferencedConfigID: + v := SelectBy_ReferencedConfigID{ + ReferencedConfigID: o.GetReferencedConfigID(), + } + m.By = &v + case *SelectBy_Kind: + v := SelectBy_Kind{ + Kind: o.GetKind(), + } + m.By = &v + } + } + +} + +func (m *WatchRequest) Copy() *WatchRequest { + if m == nil { + return nil + } + o := &WatchRequest{} + o.CopyFrom(m) + return o +} + +func (m *WatchRequest) CopyFrom(src interface{}) { + + o := src.(*WatchRequest) + *m = *o + if o.Entries != nil { + m.Entries = make([]*WatchRequest_WatchEntry, len(o.Entries)) + for i := range m.Entries { + m.Entries[i] = &WatchRequest_WatchEntry{} + deepcopy.Copy(m.Entries[i], o.Entries[i]) + } + } + + if o.ResumeFrom != nil { + m.ResumeFrom = &Version{} + deepcopy.Copy(m.ResumeFrom, o.ResumeFrom) + } +} + +func (m *WatchRequest_WatchEntry) Copy() *WatchRequest_WatchEntry { + if m == nil { + return nil + } + o := &WatchRequest_WatchEntry{} + o.CopyFrom(m) + return o +} + +func (m *WatchRequest_WatchEntry) CopyFrom(src interface{}) { + + o := src.(*WatchRequest_WatchEntry) + *m = *o + if o.Filters != nil { + m.Filters = make([]*SelectBy, len(o.Filters)) + for i := range m.Filters { + m.Filters[i] = &SelectBy{} + deepcopy.Copy(m.Filters[i], o.Filters[i]) + } + } + +} + +func (m *WatchMessage) Copy() *WatchMessage { + if m == nil { + return nil + } + o := &WatchMessage{} + o.CopyFrom(m) + return o +} + +func (m *WatchMessage) CopyFrom(src interface{}) { + + o := src.(*WatchMessage) + *m = *o + if o.Events != nil { + m.Events = make([]*WatchMessage_Event, len(o.Events)) + for i := range m.Events { + m.Events[i] = &WatchMessage_Event{} + deepcopy.Copy(m.Events[i], o.Events[i]) + } + } + + if o.Version != nil { + m.Version = &Version{} + deepcopy.Copy(m.Version, o.Version) + } +} + +func (m *WatchMessage_Event) Copy() *WatchMessage_Event { + if m == nil { + return nil + } + o := &WatchMessage_Event{} + o.CopyFrom(m) + return o +} + +func (m *WatchMessage_Event) CopyFrom(src interface{}) { + + o := src.(*WatchMessage_Event) + *m = *o + if o.Object != nil { + m.Object = &Object{} + deepcopy.Copy(m.Object, o.Object) + } + if o.OldObject != nil { + m.OldObject = &Object{} + deepcopy.Copy(m.OldObject, o.OldObject) + } +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Watch service + +type WatchClient interface { + // Watch starts a stream that returns any changes to objects that match + // the specified selectors. When the stream begins, it immediately sends + // an empty message back to the client. It is important to wait for + // this message before taking any actions that depend on an established + // stream of changes for consistency. + Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Watch_WatchClient, error) +} + +type watchClient struct { + cc *grpc.ClientConn +} + +func NewWatchClient(cc *grpc.ClientConn) WatchClient { + return &watchClient{cc} +} + +func (c *watchClient) Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Watch_WatchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Watch/Watch", opts...) + if err != nil { + return nil, err + } + x := &watchWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Watch_WatchClient interface { + Recv() (*WatchMessage, error) + grpc.ClientStream +} + +type watchWatchClient struct { + grpc.ClientStream +} + +func (x *watchWatchClient) Recv() (*WatchMessage, error) { + m := new(WatchMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Watch service + +type WatchServer interface { + // Watch starts a stream that returns any changes to objects that match + // the specified selectors. When the stream begins, it immediately sends + // an empty message back to the client. It is important to wait for + // this message before taking any actions that depend on an established + // stream of changes for consistency. + Watch(*WatchRequest, Watch_WatchServer) error +} + +func RegisterWatchServer(s *grpc.Server, srv WatchServer) { + s.RegisterService(&_Watch_serviceDesc, srv) +} + +func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(WatchRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(WatchServer).Watch(m, &watchWatchServer{stream}) +} + +type Watch_WatchServer interface { + Send(*WatchMessage) error + grpc.ServerStream +} + +type watchWatchServer struct { + grpc.ServerStream +} + +func (x *watchWatchServer) Send(m *WatchMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _Watch_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Watch", + HandlerType: (*WatchServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Watch_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/watch.proto", +} + +func (m *Object) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Object != nil { + nn1, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + return i, nil +} + +func (m *Object_Node) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Node.Size())) + n2, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} +func (m *Object_Service) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Service != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Service.Size())) + n3, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Object_Network) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Network != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Network.Size())) + n4, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} +func (m *Object_Task) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Task != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Task.Size())) + n5, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} +func (m *Object_Cluster) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Cluster != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Cluster.Size())) + n6, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *Object_Secret) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Secret != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Secret.Size())) + n7, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *Object_Resource) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Resource != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Resource.Size())) + n8, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *Object_Extension) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Extension != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Extension.Size())) + n9, err := m.Extension.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *Object_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Config != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Config.Size())) + n10, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} +func (m *SelectBySlot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectBySlot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.Slot != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Slot)) + } + return i, nil +} + +func (m *SelectByCustom) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectByCustom) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if len(m.Index) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Index))) + i += copy(dAtA[i:], m.Index) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *SelectBy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectBy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.By != nil { + nn11, err := m.By.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn11 + } + return i, nil +} + +func (m *SelectBy_ID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + return i, nil +} +func (m *SelectBy_IDPrefix) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.IDPrefix))) + i += copy(dAtA[i:], m.IDPrefix) + return i, nil +} +func (m *SelectBy_Name) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} +func (m *SelectBy_NamePrefix) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x22 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.NamePrefix))) + i += copy(dAtA[i:], m.NamePrefix) + return i, nil +} +func (m *SelectBy_Custom) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Custom != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Custom.Size())) + n12, err := m.Custom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *SelectBy_CustomPrefix) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CustomPrefix != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.CustomPrefix.Size())) + n13, err := m.CustomPrefix.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} +func (m *SelectBy_ServiceID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x3a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + return i, nil +} +func (m *SelectBy_NodeID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x42 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + return i, nil +} +func (m *SelectBy_Slot) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Slot != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Slot.Size())) + n14, err := m.Slot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} +func (m *SelectBy_DesiredState) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x50 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.DesiredState)) + return i, nil +} +func (m *SelectBy_Role) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x58 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Role)) + return i, nil +} +func (m *SelectBy_Membership) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x60 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Membership)) + return i, nil +} +func (m *SelectBy_ReferencedNetworkID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x6a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ReferencedNetworkID))) + i += copy(dAtA[i:], m.ReferencedNetworkID) + return i, nil +} +func (m *SelectBy_ReferencedSecretID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x72 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ReferencedSecretID))) + i += copy(dAtA[i:], m.ReferencedSecretID) + return i, nil +} +func (m *SelectBy_Kind) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x7a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil +} +func (m *SelectBy_ReferencedConfigID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ReferencedConfigID))) + i += copy(dAtA[i:], m.ReferencedConfigID) + return i, nil +} +func (m *WatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ResumeFrom != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.ResumeFrom.Size())) + n15, err := m.ResumeFrom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.IncludeOldObject { + dAtA[i] = 0x18 + i++ + if m.IncludeOldObject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *WatchRequest_WatchEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest_WatchEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Action != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Action)) + } + if len(m.Filters) > 0 { + for _, msg := range m.Filters { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *WatchMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Version != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Version.Size())) + n16, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} + +func (m *WatchMessage_Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchMessage_Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Action)) + } + if m.Object != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Object.Size())) + n17, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.OldObject != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.OldObject.Size())) + n18, err := m.OldObject.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func encodeVarintWatch(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyWatchServer struct { + local WatchServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyWatchServer(local WatchServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) WatchServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyWatchServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyWatchServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyWatchServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type Watch_WatchServerWrapper struct { + Watch_WatchServer + ctx context.Context +} + +func (s Watch_WatchServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyWatchServer) Watch(r *WatchRequest, stream Watch_WatchServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Watch_WatchServerWrapper{ + Watch_WatchServer: stream, + ctx: ctx, + } + return p.local.Watch(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewWatchClient(conn).Watch(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +func (m *Object) Size() (n int) { + var l int + _ = l + if m.Object != nil { + n += m.Object.Size() + } + return n +} + +func (m *Object_Node) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Service) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Network) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Task) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Cluster) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Secret) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Resource) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Extension) Size() (n int) { + var l int + _ = l + if m.Extension != nil { + l = m.Extension.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Config) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBySlot) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + if m.Slot != 0 { + n += 1 + sovWatch(uint64(m.Slot)) + } + return n +} + +func (m *SelectByCustom) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + l = len(m.Index) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + return n +} + +func (m *SelectBy) Size() (n int) { + var l int + _ = l + if m.By != nil { + n += m.By.Size() + } + return n +} + +func (m *SelectBy_ID) Size() (n int) { + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_IDPrefix) Size() (n int) { + var l int + _ = l + l = len(m.IDPrefix) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Name) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_NamePrefix) Size() (n int) { + var l int + _ = l + l = len(m.NamePrefix) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Custom) Size() (n int) { + var l int + _ = l + if m.Custom != nil { + l = m.Custom.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBy_CustomPrefix) Size() (n int) { + var l int + _ = l + if m.CustomPrefix != nil { + l = m.CustomPrefix.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBy_ServiceID) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_NodeID) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Slot) Size() (n int) { + var l int + _ = l + if m.Slot != nil { + l = m.Slot.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBy_DesiredState) Size() (n int) { + var l int + _ = l + n += 1 + sovWatch(uint64(m.DesiredState)) + return n +} +func (m *SelectBy_Role) Size() (n int) { + var l int + _ = l + n += 1 + sovWatch(uint64(m.Role)) + return n +} +func (m *SelectBy_Membership) Size() (n int) { + var l int + _ = l + n += 1 + sovWatch(uint64(m.Membership)) + return n +} +func (m *SelectBy_ReferencedNetworkID) Size() (n int) { + var l int + _ = l + l = len(m.ReferencedNetworkID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_ReferencedSecretID) Size() (n int) { + var l int + _ = l + l = len(m.ReferencedSecretID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Kind) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_ReferencedConfigID) Size() (n int) { + var l int + _ = l + l = len(m.ReferencedConfigID) + n += 2 + l + sovWatch(uint64(l)) + return n +} +func (m *WatchRequest) Size() (n int) { + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovWatch(uint64(l)) + } + } + if m.ResumeFrom != nil { + l = m.ResumeFrom.Size() + n += 1 + l + sovWatch(uint64(l)) + } + if m.IncludeOldObject { + n += 2 + } + return n +} + +func (m *WatchRequest_WatchEntry) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + if m.Action != 0 { + n += 1 + sovWatch(uint64(m.Action)) + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.Size() + n += 1 + l + sovWatch(uint64(l)) + } + } + return n +} + +func (m *WatchMessage) Size() (n int) { + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovWatch(uint64(l)) + } + } + if m.Version != nil { + l = m.Version.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} + +func (m *WatchMessage_Event) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovWatch(uint64(m.Action)) + } + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovWatch(uint64(l)) + } + if m.OldObject != nil { + l = m.OldObject.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} + +func sovWatch(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozWatch(x uint64) (n int) { + return sovWatch(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Object) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object{`, + `Object:` + fmt.Sprintf("%v", this.Object) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Node{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Service{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Network{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Task{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Cluster) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Cluster{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Secret{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Resource{`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "Resource", "Resource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Extension{`, + `Extension:` + strings.Replace(fmt.Sprintf("%v", this.Extension), "Extension", "Extension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Config{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBySlot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBySlot{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `Slot:` + fmt.Sprintf("%v", this.Slot) + `,`, + `}`, + }, "") + return s +} +func (this *SelectByCustom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectByCustom{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy{`, + `By:` + fmt.Sprintf("%v", this.By) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ID{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_IDPrefix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_IDPrefix{`, + `IDPrefix:` + fmt.Sprintf("%v", this.IDPrefix) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Name) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Name{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_NamePrefix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_NamePrefix{`, + `NamePrefix:` + fmt.Sprintf("%v", this.NamePrefix) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Custom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Custom{`, + `Custom:` + strings.Replace(fmt.Sprintf("%v", this.Custom), "SelectByCustom", "SelectByCustom", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_CustomPrefix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_CustomPrefix{`, + `CustomPrefix:` + strings.Replace(fmt.Sprintf("%v", this.CustomPrefix), "SelectByCustom", "SelectByCustom", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ServiceID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ServiceID{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_NodeID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_NodeID{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Slot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Slot{`, + `Slot:` + strings.Replace(fmt.Sprintf("%v", this.Slot), "SelectBySlot", "SelectBySlot", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_DesiredState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_DesiredState{`, + `DesiredState:` + fmt.Sprintf("%v", this.DesiredState) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Role) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Role{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Membership) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Membership{`, + `Membership:` + fmt.Sprintf("%v", this.Membership) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ReferencedNetworkID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ReferencedNetworkID{`, + `ReferencedNetworkID:` + fmt.Sprintf("%v", this.ReferencedNetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ReferencedSecretID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ReferencedSecretID{`, + `ReferencedSecretID:` + fmt.Sprintf("%v", this.ReferencedSecretID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Kind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Kind{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ReferencedConfigID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ReferencedConfigID{`, + `ReferencedConfigID:` + fmt.Sprintf("%v", this.ReferencedConfigID) + `,`, + `}`, + }, "") + return s +} +func (this *WatchRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchRequest{`, + `Entries:` + strings.Replace(fmt.Sprintf("%v", this.Entries), "WatchRequest_WatchEntry", "WatchRequest_WatchEntry", 1) + `,`, + `ResumeFrom:` + strings.Replace(fmt.Sprintf("%v", this.ResumeFrom), "Version", "Version", 1) + `,`, + `IncludeOldObject:` + fmt.Sprintf("%v", this.IncludeOldObject) + `,`, + `}`, + }, "") + return s +} +func (this *WatchRequest_WatchEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchRequest_WatchEntry{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "SelectBy", "SelectBy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WatchMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchMessage{`, + `Events:` + strings.Replace(fmt.Sprintf("%v", this.Events), "WatchMessage_Event", "WatchMessage_Event", 1) + `,`, + `Version:` + strings.Replace(fmt.Sprintf("%v", this.Version), "Version", "Version", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WatchMessage_Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchMessage_Event{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "Object", "Object", 1) + `,`, + `OldObject:` + strings.Replace(fmt.Sprintf("%v", this.OldObject), "Object", "Object", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringWatch(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Object) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Object: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Object: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Node{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Node{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Service{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Service{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Network{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Network{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Task{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Task{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Cluster{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Cluster{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Secret{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Secret{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Resource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Resource{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Extension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Extension{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Config{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Config{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelectBySlot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectBySlot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectBySlot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + m.Slot = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Slot |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelectByCustom) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectByCustom: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectByCustom: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Index = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelectBy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectBy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectBy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_IDPrefix{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_Name{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_NamePrefix{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Custom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SelectByCustom{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.By = &SelectBy_Custom{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomPrefix", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SelectByCustom{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.By = &SelectBy_CustomPrefix{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ServiceID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_NodeID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SelectBySlot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.By = &SelectBy_Slot{v} + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType) + } + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.By = &SelectBy_DesiredState{v} + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var v NodeRole + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.By = &SelectBy_Role{v} + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + var v NodeSpec_Membership + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.By = &SelectBy_Membership{v} + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencedNetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ReferencedNetworkID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencedSecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ReferencedSecretID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_Kind{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencedConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ReferencedConfigID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, &WatchRequest_WatchEntry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResumeFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResumeFrom == nil { + m.ResumeFrom = &Version{} + } + if err := m.ResumeFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeOldObject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeOldObject = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest_WatchEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (WatchActionKind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, &SelectBy{}) + if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &WatchMessage_Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Version == nil { + m.Version = &Version{} + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchMessage_Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (WatchActionKind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &Object{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OldObject == nil { + m.OldObject = &Object{} + } + if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWatch(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthWatch + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWatch(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWatch = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWatch = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/watch.proto", fileDescriptorWatch) } + +var fileDescriptorWatch = []byte{ + // 1186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xbd, 0x73, 0x1b, 0xc5, + 0x1b, 0xc7, 0x75, 0x8a, 0x7c, 0x92, 0x1e, 0xdb, 0x89, 0x67, 0xe3, 0x24, 0xf7, 0xd3, 0x2f, 0xc8, + 0x42, 0x0c, 0x90, 0x49, 0x82, 0x0c, 0x26, 0x24, 0x03, 0x04, 0x66, 0x2c, 0x59, 0x8c, 0x44, 0xc6, + 0x2f, 0xb3, 0xb6, 0x93, 0x52, 0x73, 0xbe, 0x7b, 0xac, 0x1c, 0xba, 0xbb, 0x15, 0x7b, 0x27, 0x39, + 0xee, 0x28, 0x28, 0x98, 0xf4, 0xcc, 0xd0, 0xa4, 0x82, 0x9a, 0x86, 0x0e, 0xfe, 0x81, 0x0c, 0x15, + 0x25, 0x34, 0x1a, 0xa2, 0x92, 0x82, 0xbf, 0x80, 0x82, 0xd9, 0x97, 0xf3, 0x8b, 0x72, 0xb2, 0x49, + 0xa5, 0xbd, 0xbd, 0xcf, 0xf7, 0xd9, 0x67, 0x9f, 0xb7, 0x13, 0xdc, 0xec, 0x7a, 0xf1, 0xe3, 0xc1, + 0x5e, 0xcd, 0x61, 0xc1, 0xb2, 0xcb, 0x9c, 0x1e, 0xf2, 0xe5, 0xe8, 0xc0, 0xe6, 0x41, 0xcf, 0x8b, + 0x97, 0xed, 0xbe, 0xb7, 0x7c, 0x60, 0xc7, 0xce, 0xe3, 0x5a, 0x9f, 0xb3, 0x98, 0x11, 0xa2, 0x80, + 0x5a, 0x02, 0xd4, 0x86, 0xef, 0x95, 0xce, 0xd3, 0x47, 0x7d, 0x74, 0x22, 0xa5, 0x2f, 0xdd, 0x3e, + 0x87, 0x65, 0x7b, 0x5f, 0xa0, 0x13, 0x27, 0xf4, 0x79, 0x96, 0xe3, 0xc3, 0x3e, 0x26, 0xec, 0x62, + 0x97, 0x75, 0x99, 0x5c, 0x2e, 0x8b, 0x95, 0xde, 0xbd, 0x77, 0x86, 0x05, 0x49, 0xec, 0x0d, 0xf6, + 0x97, 0xfb, 0xfe, 0xa0, 0xeb, 0x85, 0xfa, 0x47, 0x09, 0xab, 0x5f, 0xe7, 0xc0, 0xdc, 0x94, 0xce, + 0x90, 0x1a, 0xe4, 0x42, 0xe6, 0xa2, 0x65, 0x54, 0x8c, 0x1b, 0xb3, 0x2b, 0x56, 0xed, 0xe5, 0x10, + 0xd4, 0x36, 0x98, 0x8b, 0xad, 0x0c, 0x95, 0x1c, 0xb9, 0x07, 0xf9, 0x08, 0xf9, 0xd0, 0x73, 0xd0, + 0xca, 0x4a, 0xc9, 0xff, 0xd3, 0x24, 0xdb, 0x0a, 0x69, 0x65, 0x68, 0x42, 0x0b, 0x61, 0x88, 0xf1, + 0x01, 0xe3, 0x3d, 0xeb, 0xc2, 0x74, 0xe1, 0x86, 0x42, 0x84, 0x50, 0xd3, 0xc2, 0xc3, 0xd8, 0x8e, + 0x7a, 0x56, 0x6e, 0xba, 0x87, 0x3b, 0x76, 0x24, 0x24, 0x92, 0x13, 0x07, 0x39, 0xfe, 0x20, 0x8a, + 0x91, 0x5b, 0x33, 0xd3, 0x0f, 0x6a, 0x28, 0x44, 0x1c, 0xa4, 0x69, 0x72, 0x07, 0xcc, 0x08, 0x1d, + 0x8e, 0xb1, 0x65, 0x4a, 0x5d, 0x29, 0xfd, 0x66, 0x82, 0x68, 0x65, 0xa8, 0x66, 0xc9, 0x47, 0x50, + 0xe0, 0x18, 0xb1, 0x01, 0x77, 0xd0, 0xca, 0x4b, 0xdd, 0xf5, 0x34, 0x1d, 0xd5, 0x4c, 0x2b, 0x43, + 0x8f, 0x78, 0xf2, 0x09, 0x14, 0xf1, 0x49, 0x8c, 0x61, 0xe4, 0xb1, 0xd0, 0x2a, 0x48, 0xf1, 0x6b, + 0x69, 0xe2, 0x66, 0x02, 0xb5, 0x32, 0xf4, 0x58, 0x21, 0x1c, 0x76, 0x58, 0xb8, 0xef, 0x75, 0xad, + 0xe2, 0x74, 0x87, 0x1b, 0x92, 0x10, 0x0e, 0x2b, 0xb6, 0x5e, 0x48, 0x72, 0x5f, 0xdd, 0x82, 0xb9, + 0x6d, 0xf4, 0xd1, 0x89, 0xeb, 0x87, 0xdb, 0x3e, 0x8b, 0xc9, 0x6d, 0x00, 0x9d, 0xad, 0x8e, 0xe7, + 0xca, 0x8a, 0x28, 0xd6, 0xe7, 0xc7, 0xa3, 0xa5, 0xa2, 0x4e, 0x67, 0x7b, 0x8d, 0x16, 0x35, 0xd0, + 0x76, 0x09, 0x81, 0x5c, 0xe4, 0xb3, 0x58, 0x96, 0x41, 0x8e, 0xca, 0x75, 0x75, 0x0b, 0x2e, 0x26, + 0x16, 0x1b, 0x83, 0x28, 0x66, 0x81, 0xa0, 0x7a, 0x5e, 0xa8, 0xad, 0x51, 0xb9, 0x26, 0x8b, 0x30, + 0xe3, 0x85, 0x2e, 0x3e, 0x91, 0xd2, 0x22, 0x55, 0x0f, 0x62, 0x77, 0x68, 0xfb, 0x03, 0x94, 0xe5, + 0x51, 0xa4, 0xea, 0xa1, 0xfa, 0x97, 0x09, 0x85, 0xc4, 0x24, 0xb1, 0x20, 0x7b, 0xe4, 0x98, 0x39, + 0x1e, 0x2d, 0x65, 0xdb, 0x6b, 0xad, 0x0c, 0xcd, 0x7a, 0x2e, 0xb9, 0x05, 0x45, 0xcf, 0xed, 0xf4, + 0x39, 0xee, 0x7b, 0xda, 0x6c, 0x7d, 0x6e, 0x3c, 0x5a, 0x2a, 0xb4, 0xd7, 0xb6, 0xe4, 0x9e, 0x08, + 0xbb, 0xe7, 0xaa, 0x35, 0x59, 0x84, 0x5c, 0x68, 0x07, 0xfa, 0x20, 0x59, 0xd9, 0x76, 0x80, 0xe4, + 0x75, 0x98, 0x15, 0xbf, 0x89, 0x91, 0x9c, 0x7e, 0x09, 0x62, 0x53, 0x0b, 0xef, 0x83, 0xe9, 0xc8, + 0x6b, 0xe9, 0xca, 0xaa, 0xa6, 0x57, 0xc8, 0xc9, 0x00, 0xc8, 0xc0, 0xab, 0x50, 0xb4, 0x61, 0x5e, + 0xad, 0x92, 0x23, 0xcc, 0x57, 0x30, 0x32, 0xa7, 0xa4, 0xda, 0x91, 0xda, 0xa9, 0x4c, 0xe5, 0x53, + 0x32, 0x25, 0x2a, 0xe5, 0x38, 0x57, 0x6f, 0x42, 0x5e, 0x74, 0xaf, 0x80, 0x0b, 0x12, 0x86, 0xf1, + 0x68, 0xc9, 0x14, 0x8d, 0x2d, 0x49, 0x53, 0xbc, 0x6c, 0xbb, 0xe4, 0xae, 0x4e, 0xa9, 0x2a, 0xa7, + 0xca, 0x59, 0x8e, 0x89, 0x82, 0x11, 0xa1, 0x13, 0x3c, 0x59, 0x83, 0x79, 0x17, 0x23, 0x8f, 0xa3, + 0xdb, 0x89, 0x62, 0x3b, 0x46, 0x0b, 0x2a, 0xc6, 0x8d, 0x8b, 0xe9, 0xb5, 0x2c, 0x7a, 0x75, 0x5b, + 0x40, 0xe2, 0x52, 0x5a, 0x25, 0x9f, 0xc9, 0x0a, 0xe4, 0x38, 0xf3, 0xd1, 0x9a, 0x95, 0xe2, 0xeb, + 0xd3, 0x46, 0x11, 0x65, 0xbe, 0x1c, 0x47, 0x82, 0x25, 0x6d, 0x80, 0x00, 0x83, 0x3d, 0xe4, 0xd1, + 0x63, 0xaf, 0x6f, 0xcd, 0x49, 0xe5, 0xdb, 0xd3, 0x94, 0xdb, 0x7d, 0x74, 0x6a, 0xeb, 0x47, 0xb8, + 0x48, 0xee, 0xb1, 0x98, 0xac, 0xc3, 0x15, 0x8e, 0xfb, 0xc8, 0x31, 0x74, 0xd0, 0xed, 0xe8, 0xe9, + 0x23, 0x22, 0x36, 0x2f, 0x23, 0x76, 0x6d, 0x3c, 0x5a, 0xba, 0x4c, 0x8f, 0x00, 0x3d, 0xa8, 0x64, + 0xf8, 0x2e, 0xf3, 0x97, 0xb6, 0x5d, 0xf2, 0x39, 0x2c, 0x9e, 0x30, 0xa7, 0x86, 0x85, 0xb0, 0x76, + 0x51, 0x5a, 0xbb, 0x3a, 0x1e, 0x2d, 0x91, 0x63, 0x6b, 0x6a, 0xaa, 0x48, 0x63, 0x84, 0x4f, 0xee, + 0x8a, 0x86, 0x51, 0x4d, 0x74, 0x29, 0x29, 0x58, 0xd9, 0x46, 0xa7, 0x4f, 0x50, 0xdd, 0x2d, 0x4e, + 0x58, 0x48, 0x3b, 0x41, 0x8d, 0x81, 0xc9, 0x13, 0xf4, 0xae, 0x5b, 0xcf, 0x41, 0xb6, 0x7e, 0x58, + 0xfd, 0x23, 0x0b, 0x73, 0x8f, 0xc4, 0x07, 0x91, 0xe2, 0x97, 0x03, 0x8c, 0x62, 0xd2, 0x84, 0x3c, + 0x86, 0x31, 0xf7, 0x30, 0xb2, 0x8c, 0xca, 0x85, 0x1b, 0xb3, 0x2b, 0xb7, 0xd2, 0x62, 0x7b, 0x52, + 0xa2, 0x1e, 0x9a, 0x61, 0xcc, 0x0f, 0x69, 0xa2, 0x25, 0xf7, 0x61, 0x96, 0x63, 0x34, 0x08, 0xb0, + 0xb3, 0xcf, 0x59, 0x70, 0xd6, 0x87, 0xe3, 0x21, 0x72, 0x31, 0xda, 0x28, 0x28, 0xfe, 0x33, 0xce, + 0x02, 0x72, 0x1b, 0x88, 0x17, 0x3a, 0xfe, 0xc0, 0xc5, 0x0e, 0xf3, 0xdd, 0x8e, 0xfa, 0x8a, 0xca, + 0xe6, 0x2d, 0xd0, 0x05, 0xfd, 0x66, 0xd3, 0x77, 0xd5, 0x50, 0x2b, 0x7d, 0x6b, 0x00, 0x1c, 0xfb, + 0x90, 0x3a, 0x7f, 0x3e, 0x06, 0xd3, 0x76, 0x62, 0x31, 0x73, 0xb3, 0xb2, 0x60, 0xde, 0x98, 0x7a, + 0xa9, 0x55, 0x89, 0x3d, 0xf0, 0x42, 0x97, 0x6a, 0x09, 0xb9, 0x0b, 0xf9, 0x7d, 0xcf, 0x8f, 0x91, + 0x47, 0xd6, 0x05, 0x19, 0x92, 0xeb, 0x67, 0xb5, 0x09, 0x4d, 0xe0, 0xea, 0x2f, 0x49, 0x6c, 0xd7, + 0x31, 0x8a, 0xec, 0x2e, 0x92, 0x4f, 0xc1, 0xc4, 0x21, 0x86, 0x71, 0x12, 0xda, 0xb7, 0xa6, 0x7a, + 0xa1, 0x15, 0xb5, 0xa6, 0xc0, 0xa9, 0x56, 0x91, 0x0f, 0x20, 0x3f, 0x54, 0xd1, 0xfa, 0x2f, 0x01, + 0x4d, 0xd8, 0xd2, 0x4f, 0x06, 0xcc, 0x48, 0x43, 0x27, 0xc2, 0x60, 0xbc, 0x7a, 0x18, 0x56, 0xc0, + 0xd4, 0x89, 0xc8, 0x4e, 0xff, 0xf6, 0xa8, 0x94, 0x50, 0x4d, 0x92, 0x0f, 0x01, 0x26, 0x12, 0x78, + 0xb6, 0xae, 0xc8, 0x92, 0xac, 0xde, 0xfc, 0xc7, 0x80, 0x4b, 0x13, 0xae, 0x90, 0x3b, 0xb0, 0xf8, + 0x68, 0x75, 0xa7, 0xd1, 0xea, 0xac, 0x36, 0x76, 0xda, 0x9b, 0x1b, 0x9d, 0xdd, 0x8d, 0x07, 0x1b, + 0x9b, 0x8f, 0x36, 0x16, 0x32, 0xa5, 0xd2, 0xd3, 0x67, 0x95, 0xab, 0x13, 0xf8, 0x6e, 0xd8, 0x0b, + 0xd9, 0x81, 0x70, 0xfc, 0xf2, 0x29, 0x55, 0x83, 0x36, 0x57, 0x77, 0x9a, 0x0b, 0x46, 0xe9, 0x7f, + 0x4f, 0x9f, 0x55, 0xae, 0x4c, 0x88, 0x1a, 0x1c, 0xd5, 0x64, 0x3a, 0xad, 0xd9, 0xdd, 0x5a, 0x13, + 0x9a, 0x6c, 0xaa, 0x66, 0xb7, 0xef, 0xa6, 0x69, 0x68, 0x73, 0x7d, 0xf3, 0x61, 0x73, 0x21, 0x97, + 0xaa, 0xa1, 0x18, 0xb0, 0x21, 0x96, 0xae, 0x7d, 0xf3, 0x7d, 0x39, 0xf3, 0xf3, 0x0f, 0xe5, 0xc9, + 0xab, 0xae, 0x04, 0x30, 0x23, 0xb7, 0x88, 0x9b, 0x2c, 0x2a, 0xe7, 0x35, 0x62, 0xa9, 0x72, 0x5e, + 0x3d, 0x55, 0xaf, 0xfc, 0xfa, 0xe3, 0xdf, 0xdf, 0x65, 0x2f, 0xc1, 0xbc, 0x24, 0xde, 0x09, 0xec, + 0xd0, 0xee, 0x22, 0x7f, 0xd7, 0xa8, 0x5b, 0xcf, 0x5f, 0x94, 0x33, 0xbf, 0xbf, 0x28, 0x67, 0xbe, + 0x1a, 0x97, 0x8d, 0xe7, 0xe3, 0xb2, 0xf1, 0xdb, 0xb8, 0x6c, 0xfc, 0x39, 0x2e, 0x1b, 0x7b, 0xa6, + 0xfc, 0x03, 0xf9, 0xfe, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe6, 0x76, 0x89, 0xef, 0x57, 0x0b, + 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/api/watch.proto b/extender/code/vendor/github.com/docker/swarmkit/api/watch.proto new file mode 100644 index 000000000..d017730b8 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/api/watch.proto @@ -0,0 +1,154 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/specs.proto"; +import "github.com/docker/swarmkit/api/objects.proto"; +import "github.com/docker/swarmkit/api/types.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +message Object { + oneof Object { + Node node = 1; + Service service = 2; + Network network = 3; + Task task = 4; + Cluster cluster = 5; + Secret secret = 6; + Resource resource = 7; + Extension extension = 8; + Config config = 9; + } +} + +// FIXME(aaronl): These messages should ideally be embedded in SelectBy, but +// protoc generates bad code for that. +message SelectBySlot { + string service_id = 1 [(gogoproto.customname) = "ServiceID"]; + uint64 slot = 2; +} + +message SelectByCustom { + string kind = 1; + string index = 2; + string value = 3; +} + +message SelectBy { + // TODO(aaronl): Are all of these things we want to expose in + // the API? Exposing them may commit us to maintaining those + // internal indices going forward. + oneof By { + // supported by all object types + string id = 1 [(gogoproto.customname) = "ID"]; // not applicable for FindObjects - use GetObject instead + string id_prefix = 2 [(gogoproto.customname) = "IDPrefix"]; + string name = 3; + string name_prefix = 4; + SelectByCustom custom = 5; + SelectByCustom custom_prefix = 6; + + // supported by tasks only + string service_id = 7 [(gogoproto.customname) = "ServiceID"]; + string node_id = 8 [(gogoproto.customname) = "NodeID"]; + SelectBySlot slot = 9; + TaskState desired_state = 10; + + // supported by nodes only + NodeRole role = 11; + NodeSpec.Membership membership = 12; + + // supported by: service, task + string referenced_network_id = 13 [(gogoproto.customname) = "ReferencedNetworkID"]; + string referenced_secret_id = 14 [(gogoproto.customname) = "ReferencedSecretID"]; + string referenced_config_id = 16 [(gogoproto.customname) = "ReferencedConfigID"]; + + // supported by: resource + string kind = 15; + } +} + + +// Watch defines the RPC methods for monitoring data store change. +service Watch { + // Watch starts a stream that returns any changes to objects that match + // the specified selectors. When the stream begins, it immediately sends + // an empty message back to the client. It is important to wait for + // this message before taking any actions that depend on an established + // stream of changes for consistency. + rpc Watch(WatchRequest) returns (stream WatchMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; +} + +message WatchRequest { + message WatchEntry { + // Kind can contain a builtin type such as "node", "secret", etc. or + // the kind specified by a custom-defined object. + string kind = 1; + + // Action (create/update/delete) + // This is a bitmask, so multiple actions may be OR'd together + WatchActionKind action = 2; + + // Filters are combined using AND logic - an event must match + // all of them to pass the filter. + repeated SelectBy filters = 3; + } + + // Multiple entries are combined using OR logic - i.e. if an event + // matches all of the selectors specified in any single watch entry, + // the event will be sent to the client. + repeated WatchEntry entries = 1; + + // ResumeFrom provides an version to resume the watch from, if non-nil. + // The watch will return changes since this version, and continue to + // return new changes afterwards. Watch will return an error if the + // server has compacted its log and no longer has complete history to + // this point. + Version resume_from = 2; + + // IncludeOldObject causes WatchMessages to include a copy of the + // previous version of the object on updates. Note that only live + // changes will include the old object (not historical changes + // retrieved using ResumeFrom). + bool include_old_object = 3; +} + +// WatchMessage is the type of the stream that's returned to the client by +// Watch. Note that the first item of this stream will always be a WatchMessage +// with a nil Object, to signal that the stream has started. +message WatchMessage { + message Event { + // Action (create/update/delete) + // Note that WatchMessage does not expose "commit" events that + // mark transaction boundaries. + WatchActionKind action = 1; + + // Matched object + Object object = 2; + + // For updates, OldObject will optionally be included in the + // watch message, containing the previous version of the + // object, if IncludeOldObject was set in WatchRequest. + Object old_object = 3; + } + + repeated Event events = 1; + + // Index versions this change to the data store. It can be used to + // resume the watch from this point. + Version version = 2; +} + +// WatchActionKind distinguishes between creations, updates, and removals. It +// is structured as a bitmap so multiple kinds of events can be requested with +// a mask. +enum WatchActionKind { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "WatchActionKind"; + WATCH_ACTION_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "WatchActionKindUnknown"]; // default value, invalid + WATCH_ACTION_CREATE = 1 [(gogoproto.enumvalue_customname) = "WatchActionKindCreate"]; + WATCH_ACTION_UPDATE = 2 [(gogoproto.enumvalue_customname) = "WatchActionKindUpdate"]; + WATCH_ACTION_REMOVE = 4 [(gogoproto.enumvalue_customname) = "WatchActionKindRemove"]; +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/log/context.go b/extender/code/vendor/github.com/docker/swarmkit/log/context.go new file mode 100644 index 000000000..ac4f84880 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/log/context.go @@ -0,0 +1,96 @@ +package log + +import ( + "path" + + "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} + moduleKey struct{} +) + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// WithFields returns a new context with added fields to logger. +func WithFields(ctx context.Context, fields logrus.Fields) context.Context { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + logger = L + } + return WithLogger(ctx, logger.(*logrus.Entry).WithFields(fields)) +} + +// WithField is convenience wrapper around WithFields. +func WithField(ctx context.Context, key, value string) context.Context { + return WithFields(ctx, logrus.Fields{key: value}) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L + } + + return logger.(*logrus.Entry) +} + +// WithModule adds the module to the context, appending it with a slash if a +// module already exists. A module is just a roughly correlated defined by the +// call tree for a given context. +// +// As an example, we might have a "node" module already part of a context. If +// this function is called with "tls", the new value of module will be +// "node/tls". +// +// Modules represent the call path. If the new module and last module are the +// same, a new module entry will not be created. If the new module and old +// older module are the same but separated by other modules, the cycle will be +// represented by the module path. +func WithModule(ctx context.Context, module string) context.Context { + parent := GetModulePath(ctx) + + if parent != "" { + // don't re-append module when module is the same. + if path.Base(parent) == module { + return ctx + } + + module = path.Join(parent, module) + } + + ctx = WithLogger(ctx, GetLogger(ctx).WithField("module", module)) + return context.WithValue(ctx, moduleKey{}, module) +} + +// GetModulePath returns the module path for the provided context. If no module +// is set, an empty string is returned. +func GetModulePath(ctx context.Context) string { + module := ctx.Value(moduleKey{}) + if module == nil { + return "" + } + + return module.(string) +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/log/grpc.go b/extender/code/vendor/github.com/docker/swarmkit/log/grpc.go new file mode 100644 index 000000000..0417c944c --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/log/grpc.go @@ -0,0 +1,30 @@ +package log + +import ( + "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +type logrusWrapper struct { + *logrus.Entry +} + +// V provides the functionality that returns whether a particular log level is at +// least l - this is needed to meet the LoggerV2 interface. GRPC's logging levels +// are: https://github.com/grpc/grpc-go/blob/master/grpclog/loggerv2.go#L71 +// 0=info, 1=warning, 2=error, 3=fatal +// logrus's are: https://github.com/sirupsen/logrus/blob/master/logrus.go +// 0=panic, 1=fatal, 2=error, 3=warn, 4=info, 5=debug +func (lw logrusWrapper) V(l int) bool { + // translate to logrus level + logrusLevel := 4 - l + return int(lw.Logger.Level) <= logrusLevel +} + +func init() { + ctx := WithModule(context.Background(), "grpc") + + // completely replace the grpc logger with the logrus logger. + grpclog.SetLoggerV2(logrusWrapper{Entry: G(ctx)}) +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/manager/raftselector/raftselector.go b/extender/code/vendor/github.com/docker/swarmkit/manager/raftselector/raftselector.go new file mode 100644 index 000000000..89e7918a3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/manager/raftselector/raftselector.go @@ -0,0 +1,20 @@ +package raftselector + +import ( + "errors" + + "golang.org/x/net/context" + + "google.golang.org/grpc" +) + +// ConnProvider is basic interface for connecting API package(raft proxy in particular) +// to manager/state/raft package without import cycles. It provides only one +// method for obtaining connection to leader. +type ConnProvider interface { + LeaderConn(ctx context.Context) (*grpc.ClientConn, error) +} + +// ErrIsLeader is returned from LeaderConn method when current machine is leader. +// It's just shim between packages to avoid import cycles. +var ErrIsLeader = errors.New("current node is leader") diff --git a/extender/code/vendor/github.com/docker/swarmkit/protobuf/plugin/helpers.go b/extender/code/vendor/github.com/docker/swarmkit/protobuf/plugin/helpers.go new file mode 100644 index 000000000..daea795b3 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/protobuf/plugin/helpers.go @@ -0,0 +1,11 @@ +package plugin + +import ( + "github.com/gogo/protobuf/proto" + google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +// DeepcopyEnabled returns true if deepcopy is enabled for the descriptor. +func DeepcopyEnabled(options *google_protobuf.MessageOptions) bool { + return proto.GetBoolExtension(options, E_Deepcopy, true) +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go b/extender/code/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go new file mode 100644 index 000000000..0d08eb6eb --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go @@ -0,0 +1,1225 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/protobuf/plugin/plugin.proto + +/* + Package plugin is a generated protocol buffer package. + + It is generated from these files: + github.com/docker/swarmkit/protobuf/plugin/plugin.proto + + It has these top-level messages: + WatchSelectors + StoreObject + TLSAuthorization +*/ +package plugin + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type WatchSelectors struct { + // supported by all object types + ID *bool `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + IDPrefix *bool `protobuf:"varint,2,opt,name=id_prefix,json=idPrefix" json:"id_prefix,omitempty"` + Name *bool `protobuf:"varint,3,opt,name=name" json:"name,omitempty"` + NamePrefix *bool `protobuf:"varint,4,opt,name=name_prefix,json=namePrefix" json:"name_prefix,omitempty"` + Custom *bool `protobuf:"varint,5,opt,name=custom" json:"custom,omitempty"` + CustomPrefix *bool `protobuf:"varint,6,opt,name=custom_prefix,json=customPrefix" json:"custom_prefix,omitempty"` + // supported by tasks only + ServiceID *bool `protobuf:"varint,7,opt,name=service_id,json=serviceId" json:"service_id,omitempty"` + NodeID *bool `protobuf:"varint,8,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + Slot *bool `protobuf:"varint,9,opt,name=slot" json:"slot,omitempty"` + DesiredState *bool `protobuf:"varint,10,opt,name=desired_state,json=desiredState" json:"desired_state,omitempty"` + // supported by nodes only + Role *bool `protobuf:"varint,11,opt,name=role" json:"role,omitempty"` + Membership *bool `protobuf:"varint,12,opt,name=membership" json:"membership,omitempty"` + // supported by: resource + Kind *bool `protobuf:"varint,13,opt,name=kind" json:"kind,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *WatchSelectors) Reset() { *m = WatchSelectors{} } +func (*WatchSelectors) ProtoMessage() {} +func (*WatchSelectors) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +type StoreObject struct { + WatchSelectors *WatchSelectors `protobuf:"bytes,1,req,name=watch_selectors,json=watchSelectors" json:"watch_selectors,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StoreObject) Reset() { *m = StoreObject{} } +func (*StoreObject) ProtoMessage() {} +func (*StoreObject) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +type TLSAuthorization struct { + // Roles contains the acceptable TLS OU roles for the handler. + Roles []string `protobuf:"bytes,1,rep,name=roles" json:"roles,omitempty"` + // Insecure is set to true if this method does not require + // authorization. NOTE: Specifying both "insecure" and a nonempty + // list of roles is invalid. This would fail at codegen time. + Insecure *bool `protobuf:"varint,2,opt,name=insecure" json:"insecure,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TLSAuthorization) Reset() { *m = TLSAuthorization{} } +func (*TLSAuthorization) ProtoMessage() {} +func (*TLSAuthorization) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{2} } + +var E_Deepcopy = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 70000, + Name: "docker.protobuf.plugin.deepcopy", + Tag: "varint,70000,opt,name=deepcopy,def=1", + Filename: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto", +} + +var E_StoreObject = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*StoreObject)(nil), + Field: 70001, + Name: "docker.protobuf.plugin.store_object", + Tag: "bytes,70001,opt,name=store_object,json=storeObject", + Filename: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto", +} + +var E_TlsAuthorization = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MethodOptions)(nil), + ExtensionType: (*TLSAuthorization)(nil), + Field: 73626345, + Name: "docker.protobuf.plugin.tls_authorization", + Tag: "bytes,73626345,opt,name=tls_authorization,json=tlsAuthorization", + Filename: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto", +} + +func init() { + proto.RegisterType((*WatchSelectors)(nil), "docker.protobuf.plugin.WatchSelectors") + proto.RegisterType((*StoreObject)(nil), "docker.protobuf.plugin.StoreObject") + proto.RegisterType((*TLSAuthorization)(nil), "docker.protobuf.plugin.TLSAuthorization") + proto.RegisterExtension(E_Deepcopy) + proto.RegisterExtension(E_StoreObject) + proto.RegisterExtension(E_TlsAuthorization) +} + +func (m *WatchSelectors) Copy() *WatchSelectors { + if m == nil { + return nil + } + o := &WatchSelectors{} + o.CopyFrom(m) + return o +} + +func (m *WatchSelectors) CopyFrom(src interface{}) { + + o := src.(*WatchSelectors) + *m = *o +} + +func (m *StoreObject) Copy() *StoreObject { + if m == nil { + return nil + } + o := &StoreObject{} + o.CopyFrom(m) + return o +} + +func (m *StoreObject) CopyFrom(src interface{}) { + + o := src.(*StoreObject) + *m = *o + if o.WatchSelectors != nil { + m.WatchSelectors = &WatchSelectors{} + deepcopy.Copy(m.WatchSelectors, o.WatchSelectors) + } +} + +func (m *TLSAuthorization) Copy() *TLSAuthorization { + if m == nil { + return nil + } + o := &TLSAuthorization{} + o.CopyFrom(m) + return o +} + +func (m *TLSAuthorization) CopyFrom(src interface{}) { + + o := src.(*TLSAuthorization) + *m = *o + if o.Roles != nil { + m.Roles = make([]string, len(o.Roles)) + copy(m.Roles, o.Roles) + } + +} + +func (m *WatchSelectors) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchSelectors) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != nil { + dAtA[i] = 0x8 + i++ + if *m.ID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IDPrefix != nil { + dAtA[i] = 0x10 + i++ + if *m.IDPrefix { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Name != nil { + dAtA[i] = 0x18 + i++ + if *m.Name { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.NamePrefix != nil { + dAtA[i] = 0x20 + i++ + if *m.NamePrefix { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Custom != nil { + dAtA[i] = 0x28 + i++ + if *m.Custom { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.CustomPrefix != nil { + dAtA[i] = 0x30 + i++ + if *m.CustomPrefix { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ServiceID != nil { + dAtA[i] = 0x38 + i++ + if *m.ServiceID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.NodeID != nil { + dAtA[i] = 0x40 + i++ + if *m.NodeID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Slot != nil { + dAtA[i] = 0x48 + i++ + if *m.Slot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.DesiredState != nil { + dAtA[i] = 0x50 + i++ + if *m.DesiredState { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Role != nil { + dAtA[i] = 0x58 + i++ + if *m.Role { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Membership != nil { + dAtA[i] = 0x60 + i++ + if *m.Membership { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Kind != nil { + dAtA[i] = 0x68 + i++ + if *m.Kind { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StoreObject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreObject) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WatchSelectors == nil { + return 0, proto.NewRequiredNotSetError("watch_selectors") + } else { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(m.WatchSelectors.Size())) + n1, err := m.WatchSelectors.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TLSAuthorization) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSAuthorization) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Insecure != nil { + dAtA[i] = 0x10 + i++ + if *m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *WatchSelectors) Size() (n int) { + var l int + _ = l + if m.ID != nil { + n += 2 + } + if m.IDPrefix != nil { + n += 2 + } + if m.Name != nil { + n += 2 + } + if m.NamePrefix != nil { + n += 2 + } + if m.Custom != nil { + n += 2 + } + if m.CustomPrefix != nil { + n += 2 + } + if m.ServiceID != nil { + n += 2 + } + if m.NodeID != nil { + n += 2 + } + if m.Slot != nil { + n += 2 + } + if m.DesiredState != nil { + n += 2 + } + if m.Role != nil { + n += 2 + } + if m.Membership != nil { + n += 2 + } + if m.Kind != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StoreObject) Size() (n int) { + var l int + _ = l + if m.WatchSelectors != nil { + l = m.WatchSelectors.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TLSAuthorization) Size() (n int) { + var l int + _ = l + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Insecure != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *WatchSelectors) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchSelectors{`, + `ID:` + valueToStringPlugin(this.ID) + `,`, + `IDPrefix:` + valueToStringPlugin(this.IDPrefix) + `,`, + `Name:` + valueToStringPlugin(this.Name) + `,`, + `NamePrefix:` + valueToStringPlugin(this.NamePrefix) + `,`, + `Custom:` + valueToStringPlugin(this.Custom) + `,`, + `CustomPrefix:` + valueToStringPlugin(this.CustomPrefix) + `,`, + `ServiceID:` + valueToStringPlugin(this.ServiceID) + `,`, + `NodeID:` + valueToStringPlugin(this.NodeID) + `,`, + `Slot:` + valueToStringPlugin(this.Slot) + `,`, + `DesiredState:` + valueToStringPlugin(this.DesiredState) + `,`, + `Role:` + valueToStringPlugin(this.Role) + `,`, + `Membership:` + valueToStringPlugin(this.Membership) + `,`, + `Kind:` + valueToStringPlugin(this.Kind) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *StoreObject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreObject{`, + `WatchSelectors:` + strings.Replace(fmt.Sprintf("%v", this.WatchSelectors), "WatchSelectors", "WatchSelectors", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *TLSAuthorization) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TLSAuthorization{`, + `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`, + `Insecure:` + valueToStringPlugin(this.Insecure) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringPlugin(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *WatchSelectors) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchSelectors: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchSelectors: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ID = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefix", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.IDPrefix = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Name = &b + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefix", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.NamePrefix = &b + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Custom", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Custom = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomPrefix", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.CustomPrefix = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ServiceID = &b + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.NodeID = &b + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Slot = &b + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DesiredState = &b + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Role = &b + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Membership = &b + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &b + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreObject) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreObject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreObject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WatchSelectors == nil { + m.WatchSelectors = &WatchSelectors{} + } + if err := m.WatchSelectors.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return proto.NewRequiredNotSetError("watch_selectors") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TLSAuthorization) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TLSAuthorization: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TLSAuthorization: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Insecure = &b + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/protobuf/plugin/plugin.proto", fileDescriptorPlugin) +} + +var fileDescriptorPlugin = []byte{ + // 575 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x52, 0xc1, 0x6e, 0xd3, 0x4c, + 0x10, 0xae, 0xd3, 0x36, 0x4d, 0x26, 0x69, 0xff, 0xfe, 0x2b, 0x54, 0xad, 0x7a, 0x70, 0xaa, 0x46, + 0x42, 0x41, 0x42, 0x8e, 0xd4, 0x0b, 0x52, 0x6e, 0x94, 0x5c, 0x22, 0x01, 0x45, 0x0e, 0x12, 0x37, + 0x22, 0xc7, 0x3b, 0x4d, 0x96, 0x3a, 0x5e, 0x6b, 0x77, 0x4d, 0x0a, 0x27, 0x5e, 0x80, 0x07, 0xe0, + 0xca, 0xd3, 0xf4, 0xc8, 0x91, 0x53, 0x44, 0x2d, 0x71, 0xe0, 0x06, 0x6f, 0x80, 0x76, 0xd7, 0x69, + 0x08, 0x6a, 0xc5, 0xc9, 0x33, 0xdf, 0x7c, 0xdf, 0xcc, 0x7c, 0x3b, 0x86, 0x47, 0x13, 0xae, 0xa7, + 0xf9, 0x38, 0x88, 0xc5, 0xac, 0xcb, 0x44, 0x7c, 0x81, 0xb2, 0xab, 0xe6, 0x91, 0x9c, 0x5d, 0x70, + 0xdd, 0xcd, 0xa4, 0xd0, 0x62, 0x9c, 0x9f, 0x77, 0xb3, 0x24, 0x9f, 0xf0, 0xb4, 0xfc, 0x04, 0x16, + 0x26, 0x07, 0x8e, 0x1d, 0x2c, 0x49, 0x81, 0xab, 0x1e, 0x1e, 0x4d, 0x84, 0x98, 0x24, 0xb8, 0x12, + 0x33, 0x54, 0xb1, 0xe4, 0x99, 0x16, 0x25, 0xf7, 0xf8, 0xd3, 0x26, 0xec, 0xbd, 0x8a, 0x74, 0x3c, + 0x1d, 0x62, 0x82, 0xb1, 0x16, 0x52, 0x91, 0x03, 0xa8, 0x70, 0x46, 0xbd, 0x23, 0xaf, 0x53, 0x3b, + 0xad, 0x16, 0x8b, 0x56, 0x65, 0xd0, 0x0f, 0x2b, 0x9c, 0x91, 0x07, 0x50, 0xe7, 0x6c, 0x94, 0x49, + 0x3c, 0xe7, 0x97, 0xb4, 0x62, 0xcb, 0xcd, 0x62, 0xd1, 0xaa, 0x0d, 0xfa, 0x2f, 0x2c, 0x16, 0xd6, + 0x38, 0x73, 0x11, 0x21, 0xb0, 0x95, 0x46, 0x33, 0xa4, 0x9b, 0x86, 0x15, 0xda, 0x98, 0xb4, 0xa0, + 0x61, 0xbe, 0xcb, 0x06, 0x5b, 0xb6, 0x04, 0x06, 0x2a, 0x45, 0x07, 0x50, 0x8d, 0x73, 0xa5, 0xc5, + 0x8c, 0x6e, 0xdb, 0x5a, 0x99, 0x91, 0x36, 0xec, 0xba, 0x68, 0x29, 0xad, 0xda, 0x72, 0xd3, 0x81, + 0xa5, 0xf8, 0x21, 0x80, 0x42, 0xf9, 0x96, 0xc7, 0x38, 0xe2, 0x8c, 0xee, 0xd8, 0xed, 0x76, 0x8b, + 0x45, 0xab, 0x3e, 0x74, 0xe8, 0xa0, 0x1f, 0xd6, 0x4b, 0xc2, 0x80, 0x91, 0x36, 0xec, 0xa4, 0x82, + 0x59, 0x6a, 0xcd, 0x52, 0xa1, 0x58, 0xb4, 0xaa, 0xcf, 0x05, 0x33, 0xbc, 0xaa, 0x29, 0x0d, 0x98, + 0x31, 0xa1, 0x12, 0xa1, 0x69, 0xdd, 0x99, 0x30, 0xb1, 0xd9, 0x85, 0xa1, 0xe2, 0x12, 0xd9, 0x48, + 0xe9, 0x48, 0x23, 0x05, 0xb7, 0x4b, 0x09, 0x0e, 0x0d, 0x66, 0x84, 0x52, 0x24, 0x48, 0x1b, 0x4e, + 0x68, 0x62, 0xe2, 0x03, 0xcc, 0x70, 0x36, 0x46, 0xa9, 0xa6, 0x3c, 0xa3, 0x4d, 0x67, 0x7e, 0x85, + 0x18, 0xcd, 0x05, 0x4f, 0x19, 0xdd, 0x75, 0x1a, 0x13, 0x1f, 0xbf, 0x86, 0xc6, 0x50, 0x0b, 0x89, + 0x67, 0xe3, 0x37, 0x18, 0x6b, 0x72, 0x06, 0xff, 0xcd, 0xcd, 0xa5, 0x46, 0x6a, 0x79, 0x2a, 0xea, + 0x1d, 0x55, 0x3a, 0x8d, 0x93, 0xfb, 0xc1, 0xed, 0xe7, 0x0f, 0xd6, 0x0f, 0x1b, 0xee, 0xcd, 0xd7, + 0xf2, 0xe3, 0x3e, 0xec, 0xbf, 0x7c, 0x3a, 0x7c, 0x9c, 0xeb, 0xa9, 0x90, 0xfc, 0x7d, 0xa4, 0xb9, + 0x48, 0xc9, 0x3d, 0xd8, 0x36, 0xfb, 0x9a, 0xd6, 0x9b, 0x9d, 0x7a, 0xe8, 0x12, 0x72, 0x08, 0x35, + 0x9e, 0x2a, 0x8c, 0x73, 0x89, 0xee, 0xf2, 0xe1, 0x4d, 0xde, 0x7b, 0x02, 0x35, 0x86, 0x98, 0xc5, + 0x22, 0x7b, 0x47, 0x5a, 0x81, 0xfb, 0xe1, 0x56, 0x9b, 0x3c, 0x43, 0xa5, 0xa2, 0x09, 0x9e, 0x65, + 0xa6, 0xbb, 0xa2, 0x3f, 0x3f, 0xdb, 0xbb, 0xf7, 0xb6, 0xb4, 0xcc, 0x31, 0xbc, 0x11, 0xf6, 0x38, + 0x34, 0x95, 0xb1, 0x3a, 0x12, 0xce, 0xeb, 0x3f, 0x1b, 0xfd, 0xb2, 0x8d, 0x1a, 0x27, 0xed, 0xbb, + 0xbc, 0xff, 0xf1, 0x72, 0x61, 0x43, 0xad, 0x92, 0xde, 0x25, 0xfc, 0xaf, 0x13, 0x35, 0x8a, 0xd6, + 0x6c, 0xfb, 0xb7, 0xcc, 0xd3, 0x53, 0xc1, 0x96, 0xe3, 0x7e, 0x7c, 0xff, 0xd8, 0xb6, 0xf3, 0x3a, + 0x77, 0xcd, 0xfb, 0xfb, 0x25, 0xc3, 0x7d, 0x9d, 0xa8, 0x35, 0xe4, 0x94, 0x5e, 0x5d, 0xfb, 0x1b, + 0x5f, 0xaf, 0xfd, 0x8d, 0x0f, 0x85, 0xef, 0x5d, 0x15, 0xbe, 0xf7, 0xa5, 0xf0, 0xbd, 0x6f, 0x85, + 0xef, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xb3, 0x99, 0x7d, 0xfb, 0xf9, 0x03, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.proto b/extender/code/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.proto new file mode 100644 index 000000000..312517d71 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.proto @@ -0,0 +1,53 @@ +syntax = "proto2"; + +package docker.protobuf.plugin; + +import "google/protobuf/descriptor.proto"; + +message WatchSelectors { + // supported by all object types + optional bool id = 1; + optional bool id_prefix = 2; + optional bool name = 3; + optional bool name_prefix = 4; + optional bool custom = 5; + optional bool custom_prefix = 6; + + // supported by tasks only + optional bool service_id = 7; + optional bool node_id = 8; + optional bool slot = 9; + optional bool desired_state = 10; + + // supported by nodes only + optional bool role = 11; + optional bool membership = 12; + + // supported by: resource + optional bool kind = 13; +} + +message StoreObject { + required WatchSelectors watch_selectors = 1; +} + +extend google.protobuf.MessageOptions { + optional bool deepcopy = 70000 [default=true]; + optional StoreObject store_object = 70001; +} + +message TLSAuthorization { + // Roles contains the acceptable TLS OU roles for the handler. + repeated string roles = 1; + + // Insecure is set to true if this method does not require + // authorization. NOTE: Specifying both "insecure" and a nonempty + // list of roles is invalid. This would fail at codegen time. + optional bool insecure = 2; +} + +extend google.protobuf.MethodOptions { + // TLSAuthorization contains the authorization parameters for this + // method. + optional TLSAuthorization tls_authorization = 73626345; +} diff --git a/extender/code/vendor/github.com/docker/swarmkit/protobuf/ptypes/doc.go b/extender/code/vendor/github.com/docker/swarmkit/protobuf/ptypes/doc.go new file mode 100644 index 000000000..b997ca171 --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/protobuf/ptypes/doc.go @@ -0,0 +1,3 @@ +// Package ptypes provides utility functions for use with +// gogo/protobuf/ptypes. +package ptypes diff --git a/extender/code/vendor/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go b/extender/code/vendor/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go new file mode 100644 index 000000000..3890384cf --- /dev/null +++ b/extender/code/vendor/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go @@ -0,0 +1,17 @@ +package ptypes + +import ( + "time" + + gogotypes "github.com/gogo/protobuf/types" +) + +// MustTimestampProto converts time.Time to a google.protobuf.Timestamp proto. +// It panics if input timestamp is invalid. +func MustTimestampProto(t time.Time) *gogotypes.Timestamp { + ts, err := gogotypes.TimestampProto(t) + if err != nil { + panic(err.Error()) + } + return ts +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/.gitignore b/extender/code/vendor/github.com/form3tech-oss/jwt-go/.gitignore new file mode 100644 index 000000000..c0e81a8d9 --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +bin +.idea/ + + diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/.travis.yml b/extender/code/vendor/github.com/form3tech-oss/jwt-go/.travis.yml new file mode 100644 index 000000000..3c7fb7e1a --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/.travis.yml @@ -0,0 +1,12 @@ +language: go + +script: + - go vet ./... + - go test -v ./... + +go: + - 1.12 + - 1.13 + - 1.14 + - 1.15 + - tip diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/LICENSE b/extender/code/vendor/github.com/form3tech-oss/jwt-go/LICENSE new file mode 100644 index 000000000..df83a9c2f --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md b/extender/code/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 000000000..7fc1f793c --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,97 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/README.md b/extender/code/vendor/github.com/form3tech-oss/jwt-go/README.md new file mode 100644 index 000000000..d7749077f --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/README.md @@ -0,0 +1,104 @@ +# jwt-go + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +### Troubleshooting + +This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md b/extender/code/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md new file mode 100644 index 000000000..637029831 --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,118 @@ +## `jwt-go` Version History + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/claims.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/claims.go new file mode 100644 index 000000000..624890666 --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/claims.go @@ -0,0 +1,136 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience []string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud []string, cmp string, required bool) bool { + if len(aud) == 0 { + return !required + } + + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + return true + } + } + return false +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/doc.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/doc.go new file mode 100644 index 000000000..a86dc1a3b --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go new file mode 100644 index 000000000..f97738124 --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go new file mode 100644 index 000000000..db9f4be7d --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/errors.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/errors.go new file mode 100644 index 000000000..1c93024aa --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/hmac.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/hmac.go new file mode 100644 index 000000000..addbe5d40 --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/map_claims.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/map_claims.go new file mode 100644 index 000000000..90ab6bea3 --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/map_claims.go @@ -0,0 +1,102 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, ok := m["aud"].([]string) + if !ok { + strAud, ok := m["aud"].(string) + if !ok { + return false + } + aud = append(aud, strAud) + } + + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/none.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/none.go new file mode 100644 index 000000000..f04d189d0 --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/parser.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/parser.go new file mode 100644 index 000000000..d6901d9ad --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/rsa.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/rsa.go new file mode 100644 index 000000000..e4caf1ca4 --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go new file mode 100644 index 000000000..c01470864 --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go @@ -0,0 +1,142 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions + // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. + // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow + // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. + // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. + VerifyOptions *rsa.PSSOptions +} + +// Specific instances for RS/PS and company. +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + opts := m.Options + if m.VerifyOptions != nil { + opts = m.VerifyOptions + } + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go new file mode 100644 index 000000000..14c78c292 --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/signing_method.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/signing_method.go new file mode 100644 index 000000000..ed1f212b2 --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/extender/code/vendor/github.com/form3tech-oss/jwt-go/token.go b/extender/code/vendor/github.com/form3tech-oss/jwt-go/token.go new file mode 100644 index 000000000..d637e0867 --- /dev/null +++ b/extender/code/vendor/github.com/form3tech-oss/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/.editorconfig b/extender/code/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 000000000..fad895851 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*.go] +indent_style = tab +indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/.gitattributes b/extender/code/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 000000000..32f1001be --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/.gitignore b/extender/code/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 000000000..4cd0cbaf4 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/.travis.yml b/extender/code/vendor/github.com/fsnotify/fsnotify/.travis.yml new file mode 100644 index 000000000..a9c30165c --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -0,0 +1,36 @@ +sudo: false +language: go + +go: + - "stable" + - "1.11.x" + - "1.10.x" + - "1.9.x" + +matrix: + include: + - go: "stable" + env: GOLINT=true + allow_failures: + - go: tip + fast_finish: true + + +before_install: + - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi + +script: + - go test --race ./... + +after_script: + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi + - go vet ./... + +os: + - linux + - osx + - windows + +notifications: + email: false diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/AUTHORS b/extender/code/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 000000000..5ab5d41c5 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/extender/code/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 000000000..be4d7ea2c --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,317 @@ +# Changelog + +## v1.4.7 / 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## v1.4.2 / 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## v1.4.1 / 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## v1.4.0 / 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## v1.3.1 / 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## v1.3.0 / 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## v1.2.10 / 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## v1.2.9 / 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## v1.2.8 / 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## v1.2.5 / 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## v1.2.1 / 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## v1.2.0 / 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## v1.1.1 / 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## v1.1.0 / 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v1.0.4 / 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## v1.0.3 / 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## v1.0.2 / 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## v1.0.0 / 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## v0.9.3 / 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v0.9.2 / 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## v0.9.1 / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## v0.9.0 / 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## v0.8.12 / 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## v0.8.11 / 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## v0.8.10 / 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## v0.8.9 / 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## v0.8.8 / 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## v0.8.7 / 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## v0.8.6 / 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## v0.8.5 / 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## v0.8.4 / 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## v0.8.3 / 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## v0.8.2 / 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## v0.8.1 / 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## v0.8.0 / 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## v0.7.4 / 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## v0.7.3 / 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## v0.7.2 / 2012-09-01 + +* kqueue: events for created directories + +## v0.7.1 / 2012-07-14 + +* [Fix] for renaming files + +## v0.7.0 / 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## v0.6.0 / 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## v0.5.1 / 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## v0.5.0 / 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## v0.4.0 / 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## v0.3.0 / 2012-02-19 + +* kqueue: add files when watch directory + +## v0.2.0 / 2011-12-30 + +* update to latest Go weekly code + +## v0.1.0 / 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/extender/code/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 000000000..828a60b24 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/LICENSE b/extender/code/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 000000000..e180c8fb0 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/README.md b/extender/code/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 000000000..b2629e522 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,130 @@ +# File system notifications for Go + +[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) + +fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: + +```console +go get -u golang.org/x/sys/... +``` + +Cross platform: Windows, Linux, BSD and macOS. + +| Adapter | OS | Status | +| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | +| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | + +\* Android and iOS are untested. + +Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. + +Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. + +## Usage + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} +``` + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## Example + +See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + +fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/fen.go b/extender/code/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 000000000..ced39cb88 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/extender/code/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 000000000..89cab046d --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ( + ErrEventOverflow = errors.New("fsnotify queue overflow") +) diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/go.mod b/extender/code/vendor/github.com/fsnotify/fsnotify/go.mod new file mode 100644 index 000000000..ff11e13f2 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/go.mod @@ -0,0 +1,5 @@ +module github.com/fsnotify/fsnotify + +go 1.13 + +require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/go.sum b/extender/code/vendor/github.com/fsnotify/fsnotify/go.sum new file mode 100644 index 000000000..f60af9855 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/inotify.go b/extender/code/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 000000000..d9fd1b88a --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/extender/code/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 000000000..b33f2b4d4 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/kqueue.go b/extender/code/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 000000000..86e76a3d6 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/extender/code/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 000000000..2306c4620 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/extender/code/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 000000000..870c4d6d1 --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/extender/code/vendor/github.com/fsnotify/fsnotify/windows.go b/extender/code/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 000000000..09436f31d --- /dev/null +++ b/extender/code/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/extender/code/vendor/github.com/genuinetools/bpfd/LICENSE b/extender/code/vendor/github.com/genuinetools/bpfd/LICENSE new file mode 100644 index 000000000..fd05738d8 --- /dev/null +++ b/extender/code/vendor/github.com/genuinetools/bpfd/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 The Genuinetools Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/extender/code/vendor/github.com/genuinetools/bpfd/proc/proc.go b/extender/code/vendor/github.com/genuinetools/bpfd/proc/proc.go new file mode 100644 index 000000000..f2357caf8 --- /dev/null +++ b/extender/code/vendor/github.com/genuinetools/bpfd/proc/proc.go @@ -0,0 +1,557 @@ +// Package proc provides tools for inspecting proc. +package proc + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "regexp" + "strconv" + "strings" + "syscall" + + "github.com/syndtr/gocapability/capability" + "golang.org/x/sys/unix" +) + +// ContainerRuntime is the type for the various container runtime strings. +type ContainerRuntime string + +// SeccompMode is the type for the various seccomp mode strings. +type SeccompMode string + +const ( + // RuntimeDocker is the string for the docker runtime. + RuntimeDocker ContainerRuntime = "docker" + // RuntimeRkt is the string for the rkt runtime. + RuntimeRkt ContainerRuntime = "rkt" + // RuntimeNspawn is the string for the systemd-nspawn runtime. + RuntimeNspawn ContainerRuntime = "systemd-nspawn" + // RuntimeLXC is the string for the lxc runtime. + RuntimeLXC ContainerRuntime = "lxc" + // RuntimeLXCLibvirt is the string for the lxc-libvirt runtime. + RuntimeLXCLibvirt ContainerRuntime = "lxc-libvirt" + // RuntimeOpenVZ is the string for the openvz runtime. + RuntimeOpenVZ ContainerRuntime = "openvz" + // RuntimeKubernetes is the string for the kubernetes runtime. + RuntimeKubernetes ContainerRuntime = "kube" + // RuntimeGarden is the string for the garden runtime. + RuntimeGarden ContainerRuntime = "garden" + // RuntimePodman is the string for the podman runtime. + RuntimePodman ContainerRuntime = "podman" + // RuntimeGVisor is the string for the gVisor (runsc) runtime. + RuntimeGVisor ContainerRuntime = "gvisor" + // RuntimeFirejail is the string for the firejail runtime. + RuntimeFirejail ContainerRuntime = "firejail" + // RuntimeWSL is the string for the Windows Subsystem for Linux runtime. + RuntimeWSL ContainerRuntime = "wsl" + // RuntimeNotFound is the string for when no container runtime is found. + RuntimeNotFound ContainerRuntime = "not-found" + + // SeccompModeDisabled is equivalent to "0" in the /proc/{pid}/status file. + SeccompModeDisabled SeccompMode = "disabled" + // SeccompModeStrict is equivalent to "1" in the /proc/{pid}/status file. + SeccompModeStrict SeccompMode = "strict" + // SeccompModeFiltering is equivalent to "2" in the /proc/{pid}/status file. + SeccompModeFiltering SeccompMode = "filtering" + + apparmorUnconfined = "unconfined" + + uint32Max = 4294967295 + + cgroupContainerID = ":(/docker/|/kube.*/.*/|/kube.*/.*/.*/.*/|/system.slice/docker-|/machine.slice/machine-rkt-|/machine.slice/machine-|/lxc/|/lxc-libvirt/|/garden/|/podman/)([[:alnum:]\\-]{1,64})(.scope|$)" + statusFileValue = ":(.*)" +) + +var ( + // ContainerRuntimes contains all the container runtimes. + ContainerRuntimes = []ContainerRuntime{ + RuntimeDocker, + RuntimeRkt, + RuntimeNspawn, + RuntimeLXC, + RuntimeLXCLibvirt, + RuntimeOpenVZ, + RuntimeKubernetes, + RuntimeGarden, + RuntimePodman, + RuntimeGVisor, + RuntimeFirejail, + RuntimeWSL, + } + + seccompModes = map[string]SeccompMode{ + "0": SeccompModeDisabled, + "1": SeccompModeStrict, + "2": SeccompModeFiltering, + } + + cgroupContainerIDRegex = regexp.MustCompile(cgroupContainerID) + statusFileValueRegex = regexp.MustCompile(statusFileValue) +) + +// GetContainerRuntime returns the container runtime the process is running in. +// If pid is less than one, it returns the runtime for "self". +func GetContainerRuntime(tgid, pid int) ContainerRuntime { + file := "/proc/self/cgroup" + if pid > 0 { + if tgid > 0 { + file = fmt.Sprintf("/proc/%d/task/%d/cgroup", tgid, pid) + } else { + file = fmt.Sprintf("/proc/%d/cgroup", pid) + } + } + + // read the cgroups file + a := readFileString(file) + runtime := getContainerRuntime(a) + if runtime != RuntimeNotFound { + return runtime + } + + // /proc/vz exists in container and outside of the container, /proc/bc only outside of the container. + if fileExists("/proc/vz") && !fileExists("/proc/bc") { + return RuntimeOpenVZ + } + + // /__runsc_containers__ directory is present in gVisor containers. + if fileExists("/__runsc_containers__") { + return RuntimeGVisor + } + + // firejail runs with `firejail` as pid 1. + // As firejail binary cannot be run with argv[0] != "firejail" + // it's okay to rely on cmdline. + a = readFileString("/proc/1/cmdline") + runtime = getContainerRuntime(a) + if runtime != RuntimeNotFound { + return runtime + } + + // WSL has /proc/version_signature starting with "Microsoft". + a = readFileString("/proc/version_signature") + if strings.HasPrefix(a, "Microsoft") { + return RuntimeWSL + } + + a = os.Getenv("container") + runtime = getContainerRuntime(a) + if runtime != RuntimeNotFound { + return runtime + } + + // PID 1 might have dropped this information into a file in /run. + // Read from /run/systemd/container since it is better than accessing /proc/1/environ, + // which needs CAP_SYS_PTRACE + a = readFileString("/run/systemd/container") + runtime = getContainerRuntime(a) + if runtime != RuntimeNotFound { + return runtime + } + + return RuntimeNotFound +} + +func getContainerRuntime(input string) ContainerRuntime { + if len(strings.TrimSpace(input)) < 1 { + return RuntimeNotFound + } + + for _, runtime := range ContainerRuntimes { + if strings.Contains(input, string(runtime)) { + return runtime + } + } + + return RuntimeNotFound +} + +// GetContainerID returns the container ID for a process if it's running in a container. +// If pid is less than one, it returns the container ID for "self". +func GetContainerID(tgid, pid int) string { + file := "/proc/self/cgroup" + if pid > 0 { + if tgid > 0 { + file = fmt.Sprintf("/proc/%d/task/%d/cgroup", tgid, pid) + } else { + file = fmt.Sprintf("/proc/%d/cgroup", pid) + } + } + + return getContainerID(readFileString(file)) +} + +func getContainerID(input string) string { + if len(strings.TrimSpace(input)) < 1 { + return "" + } + + // rkt encodes the dashes as ascii, replace them. + input = strings.Replace(input, `\x2d`, "-", -1) + + lines := strings.Split(input, "\n") + for _, line := range lines { + matches := cgroupContainerIDRegex.FindStringSubmatch(line) + if len(matches) > 2 { + return matches[2] + } + } + + return "" +} + +// GetAppArmorProfile determines the AppArmor profile for a process. +// If pid is less than one, it returns the AppArmor profile for "self". +func GetAppArmorProfile(pid int) string { + file := "/proc/self/attr/current" + if pid > 0 { + file = fmt.Sprintf("/proc/%d/attr/current", pid) + } + + f := readFileString(file) + if f == "" { + return apparmorUnconfined + } + return f +} + +// UserMapping holds the values for a {uid,gid}_map. +type UserMapping struct { + ContainerID int64 + HostID int64 + Range int64 +} + +// GetUserNamespaceInfo determines if the process is running in a UserNamespace +// and returns the mappings if true. +// If pid is less than one, it returns the user namespace info for "self". +func GetUserNamespaceInfo(pid int) (bool, []UserMapping) { + file := "/proc/self/uid_map" + if pid > 0 { + file = fmt.Sprintf("/proc/%d/uid_map", pid) + } + + f := readFileString(file) + if len(f) < 0 { + // user namespace is uninitialized + return true, nil + } + + userNs, mappings, err := readUserMappings(f) + if err != nil { + return false, nil + } + + return userNs, mappings +} + +func readUserMappings(f string) (iuserNS bool, mappings []UserMapping, err error) { + parts := strings.Split(f, " ") + parts = deleteEmpty(parts) + if len(parts) < 3 { + return false, nil, nil + } + + for i := 0; i < len(parts); i += 3 { + nsu, hu, r := parts[i], parts[i+1], parts[i+2] + mapping := UserMapping{} + + mapping.ContainerID, err = strconv.ParseInt(nsu, 10, 0) + if err != nil { + return false, nil, nil + } + mapping.HostID, err = strconv.ParseInt(hu, 10, 0) + if err != nil { + return false, nil, nil + } + mapping.Range, err = strconv.ParseInt(r, 10, 0) + if err != nil { + return false, nil, nil + } + + if mapping.ContainerID == 0 && mapping.HostID == 0 && mapping.Range == uint32Max { + return false, nil, nil + } + + mappings = append(mappings, mapping) + } + + return true, mappings, nil +} + +// GetCapabilities returns the allowed capabilities for the process. +// If pid is less than one, it returns the capabilities for "self". +func GetCapabilities(pid int) (map[string][]string, error) { + allCaps := capability.List() + + caps, err := capability.NewPid(pid) + if err != nil { + return nil, err + } + + allowedCaps := map[string][]string{} + allowedCaps["EFFECTIVE | PERMITTED | INHERITABLE"] = []string{} + allowedCaps["BOUNDING"] = []string{} + allowedCaps["AMBIENT"] = []string{} + + for _, cap := range allCaps { + if caps.Get(capability.CAPS, cap) { + allowedCaps["EFFECTIVE | PERMITTED | INHERITABLE"] = append(allowedCaps["EFFECTIVE | PERMITTED | INHERITABLE"], cap.String()) + } + if caps.Get(capability.BOUNDING, cap) { + allowedCaps["BOUNDING"] = append(allowedCaps["BOUNDING"], cap.String()) + } + if caps.Get(capability.AMBIENT, cap) { + allowedCaps["AMBIENT"] = append(allowedCaps["AMBIENT"], cap.String()) + } + } + + return allowedCaps, nil +} + +// GetUIDGID returns the uid and gid for a process. +// If pid is less than one, it returns the seccomp enforcing mode for "self". +func GetUIDGID(tgid, pid int) (uint32, uint32, error) { + file := "/proc/self/status" + if pid > 0 { + if tgid > 0 { + file = fmt.Sprintf("/proc/%d/task/%d/status", tgid, pid) + } else { + file = fmt.Sprintf("/proc/%d/status", pid) + } + } + + return getUIDGID(readFileString(file)) +} + +func getUIDGID(input string) (uint32, uint32, error) { + // Split status file string by line + statusMappings := strings.Split(input, "\n") + statusMappings = deleteEmpty(statusMappings) + + var uid, gid string + for _, line := range statusMappings { + if strings.Contains(line, "Uid:") { + matches := statusFileValueRegex.FindStringSubmatch(line) + if len(matches) > 1 { + uid = matches[1] + continue + } + } + if strings.Contains(line, "Gid:") { + matches := statusFileValueRegex.FindStringSubmatch(line) + if len(matches) > 1 { + gid = matches[1] + continue + } + } + if len(uid) > 0 && len(gid) > 0 { + break + } + } + + if len(uid) < 1 && len(gid) < 1 { + return 0, 0, nil + } + + u, err := strconv.Atoi(strings.Split(strings.Split(strings.TrimSpace(uid), " ")[0], "\t")[0]) + if err != nil { + return 0, 0, err + } + g, err := strconv.Atoi(strings.Split(strings.Split(strings.TrimSpace(gid), " ")[0], "\t")[0]) + if err != nil { + return 0, 0, err + } + + return uint32(u), uint32(g), nil +} + +// GetSeccompEnforcingMode returns the seccomp enforcing level (disabled, filtering, strict) +// for a process. +// If pid is less than one, it returns the seccomp enforcing mode for "self". +func GetSeccompEnforcingMode(pid int) SeccompMode { + file := "/proc/self/status" + if pid > 0 { + file = fmt.Sprintf("/proc/%d/status", pid) + } + + return getSeccompEnforcingMode(readFileString(file)) +} + +func getSeccompEnforcingMode(input string) SeccompMode { + mode := getStatusEntry(input, "Seccomp:") + sm, ok := seccompModes[mode] + if ok { + return sm + } + + // Pre linux 3.8, check if Seccomp is supported, via CONFIG_SECCOMP. + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { + return SeccompModeStrict + } + } + + return SeccompModeDisabled +} + +// GetNoNewPrivileges returns if no_new_privileges is set +// for a process. +// If pid is less than one, it returns if set for "self". +func GetNoNewPrivileges(pid int) bool { + file := "/proc/self/status" + if pid > 0 { + file = fmt.Sprintf("/proc/%d/status", pid) + } + + return getNoNewPrivileges(readFileString(file)) +} + +func getNoNewPrivileges(input string) bool { + return getStatusEntry(input, "NoNewPrivs:") == "1" +} + +// GetCmdline returns the cmdline for a process. +// If pid is less than one, it returns the cmdline for "self". +func GetCmdline(pid int) []string { + file := "/proc/self/cmdline" + if pid > 0 { + file = fmt.Sprintf("/proc/%d/cmdline", pid) + } + + return parseProcFile(readFile(file)) +} + +// GetEnviron returns the environ for a process. +// If pid is less than one, it returns the environ for "self". +func GetEnviron(pid int) []string { + file := "/proc/self/environ" + if pid > 0 { + file = fmt.Sprintf("/proc/%d/environ", pid) + } + + return parseProcFile(readFile(file)) +} + +// GetCwd returns the current working directory for the process. +// If pid is less than one, it returns the current working directory for "self". +func GetCwd(pid int) string { + file := "/proc/self/cwd" + if pid > 0 { + file = fmt.Sprintf("/proc/%d/cwd", pid) + } + + cwd, err := os.Readlink(file) + if err != nil { + if os.IsPermission(err) { + // Ignore the permission errors or the logs are noisy. + return "" + } + // Ignore errors in general. + return "" + } + + return cwd +} + +// TODO: make this function more efficient and read the file line by line. +func getStatusEntry(input, find string) string { + // Split status file string by line + statusMappings := strings.Split(input, "\n") + statusMappings = deleteEmpty(statusMappings) + + for _, line := range statusMappings { + if strings.Contains(line, find) { + matches := statusFileValueRegex.FindStringSubmatch(line) + if len(matches) > 1 { + return strings.TrimSpace(matches[1]) + } + } + } + + return "" +} + +func fileExists(file string) bool { + if _, err := os.Stat(file); !os.IsNotExist(err) { + return true + } + return false +} + +func readFile(file string) []byte { + if !fileExists(file) { + return nil + } + + b, _ := ioutil.ReadFile(file) + return b +} + +func readFileString(file string) string { + b := readFile(file) + if b == nil { + return "" + } + return strings.TrimSpace(string(b)) +} + +func deleteEmpty(s []string) []string { + var r []string + for _, str := range s { + if strings.TrimSpace(str) != "" { + r = append(r, strings.TrimSpace(str)) + } + } + return r +} + +func parseProcFile(data []byte) []string { + if len(data) < 1 { + return nil + } + if data[len(data)-1] == 0 { + data = data[:len(data)-1] + } + parts := bytes.Split(data, []byte{0}) + var strParts []string + for _, p := range parts { + strParts = append(strParts, string(p)) + } + + return strParts +} + +// IsValidContainerRuntime checks if a string is a valid container runtime. +func IsValidContainerRuntime(s string) bool { + for _, b := range ContainerRuntimes { + if string(b) == s { + return true + } + } + return false +} + +// HasNamespace determines if a container is using a particular namespace or the +// host namespace. +// The device number of an unnamespaced /proc/1/ns/{ns} is 4 and anything else is +// higher. +// Only works from inside a container. +func HasNamespace(ns string) (bool, error) { + file := fmt.Sprintf("/proc/1/ns/%s", ns) + + // Use Lstat to not follow the symlink. + var info syscall.Stat_t + if err := syscall.Lstat(file, &info); err != nil { + return false, &os.PathError{Op: "lstat", Path: file, Err: err} + } + + // Get the device number. If it is higher than 4 it is in a namespace. + if info.Dev > 4 { + return true, nil + } + + return false, nil +} diff --git a/extender/code/vendor/github.com/go-logr/logr/LICENSE b/extender/code/vendor/github.com/go-logr/logr/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/extender/code/vendor/github.com/go-logr/logr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/go-logr/logr/README.md b/extender/code/vendor/github.com/go-logr/logr/README.md new file mode 100644 index 000000000..e9b5520a1 --- /dev/null +++ b/extender/code/vendor/github.com/go-logr/logr/README.md @@ -0,0 +1,183 @@ +# A more minimal logging API for Go + +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what +he has to say, and it largely aligns with my own experiences. Too many +choices of levels means inconsistent logs. + +This package offers a purely abstract interface, based on these ideas but with +a few twists. Code can depend on just this interface and have the actual +logging implementation be injected from callers. Ideally only `main()` knows +what logging implementation is being used. + +# Differences from Dave's ideas + +The main differences are: + +1) Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. I disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. I +restrict the API to just 2 types of logs: info and error. + +Info logs are things you want to tell the user which are not errors. Error +logs are, well, errors. If your code receives an `error` from a subordinate +function call and is logging that `error` *and not returning it*, use error +logs. + +2) Verbosity-levels on info logs. This gives developers a chance to indicate +arbitrary grades of importance for info logs, without assigning names with +semantic meaning such as "warning", "trace", and "debug". Superficially this +may feel very similar, but the primary difference is the lack of semantics. +Because verbosity is a numerical value, it's safe to assume that an app running +with higher verbosity means more (and less important) logs will be generated. + +This is a BETA grade API. + +There are implementations for the following logging libraries: + +- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) +- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr) +- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) +- **log** (the Go standard library logger): + [stdr](https://github.com/go-logr/stdr) +- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) +- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) +- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) + +# FAQ + +## Conceptual + +## Why structured logging? + +- **Structured logs are more easily queriable**: Since you've got + key-value pairs, it's much easier to query your structured logs for + particular values by filtering on the contents of a particular key -- + think searching request logs for error codes, Kubernetes reconcilers for + the name and namespace of the reconciled object, etc + +- **Structured logging makes it easier to have cross-referencable logs**: + Similarly to searchability, if you maintain conventions around your + keys, it becomes easy to gather all log lines related to a particular + concept. + +- **Structured logs allow better dimensions of filtering**: if you have + structure to your logs, you've got more precise control over how much + information is logged -- you might choose in a particular configuration + to log certain keys but not others, only log lines where a certain key + matches a certain value, etc, instead of just having v-levels and names + to key off of. + +- **Structured logs better represent structured data**: sometimes, the + data that you want to log is inherently structured (think tuple-link + objects). Structured logs allow you to preserve that structure when + outputting. + +## Why V-levels? + +**V-levels give operators an easy way to control the chattiness of log +operations**. V-levels provide a way for a given package to distinguish +the relative importance or verbosity of a given log message. Then, if +a particular logger or package is logging too many messages, the user +of the package can simply change the v-levels for that library. + +## Why not more named levels, like Warning? + +Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences +from Dave's ideas](#differences-from-daves-ideas). + +## Why not allow format strings, too? + +**Format strings negate many of the benefits of structured logs**: + +- They're not easily searchable without resorting to fuzzy searching, + regular expressions, etc + +- They don't store structured data well, since contents are flattened into + a string + +- They're not cross-referencable + +- They don't compress easily, since the message is not constant + +(unless you turn positional parameters into key-value pairs with numerical +keys, at which point you've gotten key-value logging with meaningless +keys) + +## Practical + +## Why key-value pairs, and not a map? + +Key-value pairs are *much* easier to optimize, especially around +allocations. Zap (a structured logger that inspired logr's interface) has +[performance measurements](https://github.com/uber-go/zap#performance) +that show this quite nicely. + +While the interface ends up being a little less obvious, you get +potentially better performance, plus avoid making users type +`map[string]string{}` every time they want to log. + +## What if my V-levels differ between libraries? + +That's fine. Control your V-levels on a per-logger basis, and use the +`WithName` function to pass different loggers to different libraries. + +Generally, you should take care to ensure that you have relatively +consistent V-levels within a given logger, however, as this makes deciding +on what verbosity of logs to request easier. + +## But I *really* want to use a format string! + +That's not actually a question. Assuming your question is "how do +I convert my mental model of logging with format strings to logging with +constant messages": + +1. figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message + +2. For every place you'd write a format specifier, look to the word before + it, and add that as a key value pair + +For instance, consider the following examples (all taken from spots in the +Kubernetes codebase): + +- `klog.V(4).Infof("Client is returning errors: code %v, error %v", + responseCode, err)` becomes `logger.Error(err, "client returned an + error", "code", responseCode)` + +- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", + seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after + response when requesting url", "attempt", retries, "after + seconds", seconds, "url", url)` + +If you *really* must use a format string, place it as a key value, and +call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to +reflect over type %T")` becomes `logger.Info("unable to reflect over +type", "type", fmt.Sprintf("%T"))`. In general though, the cases where +this is necessary should be few and far between. + +## How do I choose my V-levels? + +This is basically the only hard constraint: increase V-levels to denote +more verbose or more debug-y logs. + +Otherwise, you can start out with `0` as "you always want to see this", +`1` as "common logging that you might *possibly* want to turn off", and +`10` as "I would like to performance-test your log collection stack". + +Then gradually choose levels in between as you need them, working your way +down from 10 (for debug and trace style logs) and up from 1 (for chattier +info-type logs). + +## How do I choose my keys + +- make your keys human-readable +- constant keys are generally a good idea +- be consistent across your codebase +- keys should naturally match parts of the message string + +While key names are mostly unrestricted (and spaces are acceptable), +it's generally a good idea to stick to printable ascii characters, or at +least match the general character set of your log lines. + +[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/extender/code/vendor/github.com/go-logr/logr/discard.go b/extender/code/vendor/github.com/go-logr/logr/discard.go new file mode 100644 index 000000000..2bafb13d1 --- /dev/null +++ b/extender/code/vendor/github.com/go-logr/logr/discard.go @@ -0,0 +1,51 @@ +/* +Copyright 2020 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// Discard returns a valid Logger that discards all messages logged to it. +// It can be used whenever the caller is not interested in the logs. +func Discard() Logger { + return DiscardLogger{} +} + +// DiscardLogger is a Logger that discards all messages. +type DiscardLogger struct{} + +func (l DiscardLogger) Enabled() bool { + return false +} + +func (l DiscardLogger) Info(msg string, keysAndValues ...interface{}) { +} + +func (l DiscardLogger) Error(err error, msg string, keysAndValues ...interface{}) { +} + +func (l DiscardLogger) V(level int) Logger { + return l +} + +func (l DiscardLogger) WithValues(keysAndValues ...interface{}) Logger { + return l +} + +func (l DiscardLogger) WithName(name string) Logger { + return l +} + +// Verify that it actually implements the interface +var _ Logger = DiscardLogger{} diff --git a/extender/code/vendor/github.com/go-logr/logr/go.mod b/extender/code/vendor/github.com/go-logr/logr/go.mod new file mode 100644 index 000000000..591884e91 --- /dev/null +++ b/extender/code/vendor/github.com/go-logr/logr/go.mod @@ -0,0 +1,3 @@ +module github.com/go-logr/logr + +go 1.14 diff --git a/extender/code/vendor/github.com/go-logr/logr/logr.go b/extender/code/vendor/github.com/go-logr/logr/logr.go new file mode 100644 index 000000000..842428bd3 --- /dev/null +++ b/extender/code/vendor/github.com/go-logr/logr/logr.go @@ -0,0 +1,266 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This design derives from Dave Cheney's blog: +// http://dave.cheney.net/2015/11/05/lets-talk-about-logging +// +// This is a BETA grade API. Until there is a significant 2nd implementation, +// I don't really know how it will change. + +// Package logr defines abstract interfaces for logging. Packages can depend on +// these interfaces and callers can implement logging in whatever way is +// appropriate. +// +// Usage +// +// Logging is done using a Logger. Loggers can have name prefixes and named +// values attached, so that all log messages logged with that Logger have some +// base context associated. +// +// The term "key" is used to refer to the name associated with a particular +// value, to disambiguate it from the general Logger name. +// +// For instance, suppose we're trying to reconcile the state of an object, and +// we want to log that we've made some decision. +// +// With the traditional log package, we might write: +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) +// +// With logr's structured logging, we'd write: +// // elsewhere in the file, set up the logger to log with the prefix of +// // "reconcilers", and the named value target-type=Foo, for extra context. +// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") +// +// // later on... +// log.Info("setting foo on object", "value", targetValue, "object", object) +// +// Depending on our logging implementation, we could then make logging decisions +// based on field values (like only logging such events for objects in a certain +// namespace), or copy the structured information into a structured log store. +// +// For logging errors, Logger has a method called Error. Suppose we wanted to +// log an error while reconciling. With the traditional log package, we might +// write: +// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) +// +// With logr, we'd instead write: +// // assuming the above setup for log +// log.Error(err, "unable to reconcile object", "object", object) +// +// This functions similarly to: +// log.Info("unable to reconcile object", "error", err, "object", object) +// +// However, it ensures that a standard key for the error value ("error") is used +// across all error logging. Furthermore, certain implementations may choose to +// attach additional information (such as stack traces) on calls to Error, so +// it's preferred to use Error to log errors. +// +// Parts of a log line +// +// Each log message from a Logger has four types of context: +// logger name, log verbosity, log message, and the named values. +// +// The Logger name consists of a series of name "segments" added by successive +// calls to WithName. These name segments will be joined in some way by the +// underlying implementation. It is strongly recommended that name segments +// contain simple identifiers (letters, digits, and hyphen), and do not contain +// characters that could muddle the log output or confuse the joining operation +// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc). +// +// Log verbosity represents how little a log matters. Level zero, the default, +// matters most. Increasing levels matter less and less. Try to avoid lots of +// different verbosity levels, and instead provide useful keys, logger names, +// and log messages for users to filter on. It's illegal to pass a log level +// below zero. +// +// The log message consists of a constant message attached to the log line. +// This should generally be a simple description of what's occurring, and should +// never be a format string. +// +// Variable information can then be attached using named values (key/value +// pairs). Keys are arbitrary strings, while values may be any Go value. +// +// Key Naming Conventions +// +// Keys are not strictly required to conform to any specification or regex, but +// it is recommended that they: +// * be human-readable and meaningful (not auto-generated or simple ordinals) +// * be constant (not dependent on input data) +// * contain only printable characters +// * not contain whitespace or punctuation +// +// These guidelines help ensure that log data is processed properly regardless +// of the log implementation. For example, log implementations will try to +// output JSON data or will store data for later database (e.g. SQL) queries. +// +// While users are generally free to use key names of their choice, it's +// generally best to avoid using the following keys, as they're frequently used +// by implementations: +// +// * `"caller"`: the calling information (file/line) of a particular log line. +// * `"error"`: the underlying error value in the `Error` method. +// * `"level"`: the log level. +// * `"logger"`: the name of the associated logger. +// * `"msg"`: the log message. +// * `"stacktrace"`: the stack trace associated with a particular log line or +// error (often from the `Error` message). +// * `"ts"`: the timestamp for a log line. +// +// Implementations are encouraged to make use of these keys to represent the +// above concepts, when necessary (for example, in a pure-JSON output form, it +// would be necessary to represent at least message and timestamp as ordinary +// named values). +// +// Implementations may choose to give callers access to the underlying +// logging implementation. The recommended pattern for this is: +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } +package logr + +import ( + "context" +) + +// TODO: consider adding back in format strings if they're really needed +// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) +// TODO: consider other bits of glog functionality like Flush, OutputStats + +// Logger represents the ability to log messages, both errors and not. +type Logger interface { + // Enabled tests whether this Logger is enabled. For example, commandline + // flags might be used to set the logging verbosity and disable some info + // logs. + Enabled() bool + + // Info logs a non-error message with the given key/value pairs as context. + // + // The msg argument should be used to add some constant description to + // the log line. The key/value pairs can then be used to add additional + // variable information. The key/value pairs should alternate string + // keys and arbitrary values. + Info(msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs as context. + // It functions similarly to calling Info with the "error" named value, but may + // have unique behavior, and should be preferred for logging errors (see the + // package documentations for more information). + // + // The msg field should be used to add context to any underlying error, + // while the err field should be used to attach the actual error that + // triggered this log line, if present. + Error(err error, msg string, keysAndValues ...interface{}) + + // V returns an Logger value for a specific verbosity level, relative to + // this Logger. In other words, V values are additive. V higher verbosity + // level means a log message is less important. It's illegal to pass a log + // level less than zero. + V(level int) Logger + + // WithValues adds some key-value pairs of context to a logger. + // See Info for documentation on how key/value pairs work. + WithValues(keysAndValues ...interface{}) Logger + + // WithName adds a new element to the logger's name. + // Successive calls with WithName continue to append + // suffixes to the logger's name. It's strongly recommended + // that name segments contain only letters, digits, and hyphens + // (see the package documentation for more information). + WithName(name string) Logger +} + +// InfoLogger provides compatibility with code that relies on the v0.1.0 +// interface. +// +// Deprecated: InfoLogger is an artifact of early versions of this API. New +// users should never use it and existing users should use Logger instead. This +// will be removed in a future release. +type InfoLogger = Logger + +type contextKey struct{} + +// FromContext returns a Logger constructed from ctx or nil if no +// logger details are found. +func FromContext(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return nil +} + +// FromContextOrDiscard returns a Logger constructed from ctx or a Logger +// that discards all messages if no logger details are found. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new context derived from ctx that embeds the Logger. +func NewContext(ctx context.Context, l Logger) context.Context { + return context.WithValue(ctx, contextKey{}, l) +} + +// CallDepthLogger represents a Logger that knows how to climb the call stack +// to identify the original call site and can offset the depth by a specified +// number of frames. This is useful for users who have helper functions +// between the "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as file, +// function, or line) would otherwise log information about the intermediate +// helper functions. +// +// This is an optional interface and implementations are not required to +// support it. +type CallDepthLogger interface { + Logger + + // WithCallDepth returns a Logger that will offset the call stack by the + // specified number of frames when logging call site information. If depth + // is 0 the attribution should be to the direct caller of this method. If + // depth is 1 the attribution should skip 1 call frame, and so on. + // Successive calls to this are additive. + WithCallDepth(depth int) Logger +} + +// WithCallDepth returns a Logger that will offset the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports the CallDepthLogger interface, +// the WithCallDepth method will be called and the result returned. If the +// implementation does not support CallDepthLogger, the original Logger will be +// returned. +// +// Callers which care about whether this was supported or not should test for +// CallDepthLogger support themselves. +func WithCallDepth(logger Logger, depth int) Logger { + if decorator, ok := logger.(CallDepthLogger); ok { + return decorator.WithCallDepth(depth) + } + return logger +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/AUTHORS b/extender/code/vendor/github.com/gogo/protobuf/AUTHORS new file mode 100644 index 000000000..3d97fc7a2 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of GoGo authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS file, which +# lists people. For example, employees are listed in CONTRIBUTORS, +# but not in AUTHORS, because the employer holds the copyright. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Sendgrid, Inc +Vastech SA (PTY) LTD +Walter Schulze diff --git a/extender/code/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/extender/code/vendor/github.com/gogo/protobuf/CONTRIBUTORS new file mode 100644 index 000000000..1b4f6c208 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -0,0 +1,23 @@ +Anton Povarov +Brian Goff +Clayton Coleman +Denis Smirnov +DongYun Kang +Dwayne Schultz +Georg Apitz +Gustav Paul +Johan Brandhorst +John Shahid +John Tuley +Laurent +Patrick Lee +Peter Edge +Roger Johansson +Sam Nguyen +Sergio Arbeo +Stephen J Day +Tamir Duberstein +Todd Eisenberger +Tormod Erevik Lea +Vyacheslav Kim +Walter Schulze diff --git a/extender/code/vendor/github.com/gogo/protobuf/LICENSE b/extender/code/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 000000000..f57de90da --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/extender/code/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/extender/code/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 000000000..0b4659b73 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/extender/code/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/extender/code/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 000000000..081c86fa8 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/extender/code/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/extender/code/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 000000000..1e91766ae --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/extender/code/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 000000000..f6502e4b9 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/extender/code/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 000000000..b80c85653 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/extender/code/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 000000000..390d4e4be --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/Makefile b/extender/code/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 000000000..00d65f327 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C test_proto + make -C proto3_proto + make diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/clone.go b/extender/code/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 000000000..a26b046d9 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 000000000..24552483c --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/decode.go b/extender/code/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 000000000..63b0f08be --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,427 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/deprecated.go b/extender/code/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 000000000..35b882c09 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/discard.go b/extender/code/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 000000000..fe1bd7d90 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/duration.go b/extender/code/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 000000000..93464c91c --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 000000000..e748e1730 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/encode.go b/extender/code/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 000000000..9581ccd30 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,205 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + sizVar := SizeVarint(uint64(siz)) + p.grow(siz + sizVar) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 000000000..0f5fb173e --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,33 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/equal.go b/extender/code/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 000000000..d4db5a1c1 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/extensions.go b/extender/code/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 000000000..341c6f57f --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,605 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, ok := pb.(extensionsBytes); ok { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + return decodeExtensionFromBytes(extension, *ext) + } + + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, ok := pb.(extensionsBytes); ok { + ClearExtension(pb, extension) + newb, err := encodeExtension(extension, value) + if err != nil { + return err + } + bb := epb.GetExtensions() + *bb = append(*bb, newb...) + return nil + } + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 000000000..6f1ae120e --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,389 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + "sync" +) + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMapBackwards(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n + } + return o, nil +} + +func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + end := len(data) + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[end-len(e.enc):], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + end -= n + o += n + } + return o, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + e := m[id] + if err := e.Encode(); err != nil { + return nil, err + } + return e.enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } + } + return nil +} + +func (this Extension) GoString() string { + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/lib.go b/extender/code/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 000000000..80db1c155 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,973 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or T or []*T or []T + switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.Kind() == reflect.Ptr && e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr, reflect.Struct: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion1 = true +) + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 000000000..b3aa39190 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,50 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/message_set.go b/extender/code/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 000000000..f48a75676 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,181 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/extender/code/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..b6cad9083 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 000000000..7ffd3c29d --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,59 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" +) + +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/extender/code/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..d55a335d9 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 000000000..aca8eed02 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,56 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/properties.go b/extender/code/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 000000000..28da1475f --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,610 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool + WktPointer bool + + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + log.Printf("proto: tag has too few fields: %q", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + log.Printf("proto: tag has unknown wire type: %q", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + case f == "wktptr": + p.WktPointer = true + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.ctype = typ + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + case reflect.Struct: + p.stype = typ + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + case reflect.Struct: + p.stype = t3 + } + case reflect.Struct: + p.stype = t2 + } + + case reflect.Map: + + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + if isOneofMessage { + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() + } + if len(oots) > 0 { + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 000000000..40ea3dd93 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,36 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 000000000..5a5fd93f7 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/extender/code/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 000000000..f8babdefa --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,3009 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if isOneofMessage { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] + pp, err = m.XXX_Marshal(pp, p.deterministic) + p.buf = append(p.buf, pp...) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 000000000..997f57c1e --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/table_merge.go b/extender/code/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 000000000..60dcf70d1 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,676 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case isSlice && !isPointer: // E.g. []pb.T + mergeInfo := getMergeInfo(tf) + zero := reflect.Zero(tf) + mfi.merge = func(dst, src pointer) { + // TODO: Make this faster? + dstsp := dst.asPointerTo(f.Type) + dsts := dstsp.Elem() + srcs := src.asPointerTo(f.Type).Elem() + for i := 0; i < srcs.Len(); i++ { + dsts = reflect.Append(dsts, zero) + srcElement := srcs.Index(i).Addr() + dstElement := dsts.Index(dsts.Len() - 1).Addr() + mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) + } + if dsts.IsNil() { + dsts = reflect.MakeSlice(f.Type, 0, 0) + } + dstsp.Elem().Set(dsts) + } + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/extender/code/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 000000000..937229386 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2249 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if len(oneofFields) > 0 { + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + + } + } + + // Get extension ranges, if any. + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 000000000..00d6c7ad9 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/text.go b/extender/code/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 000000000..87416afe9 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,930 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if v.Type().Implements(textMarshalerType) { + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 000000000..1d6c6aa0e --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/text_parser.go b/extender/code/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 000000000..1ce0be2fa --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1018 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/timestamp.go b/extender/code/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 000000000..9324f6542 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 000000000..38439fa99 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/wrappers.go b/extender/code/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 000000000..b175d1b64 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 000000000..c1cf7bf85 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 000000000..3496dc99d --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 000000000..a85bf1984 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 000000000..18b2a3318 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 000000000..165b2110d --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 000000000..e0846a357 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/extender/code/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go new file mode 100644 index 000000000..ceadde6a5 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go @@ -0,0 +1,101 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package sortkeys + +import ( + "sort" +) + +func Strings(l []string) { + sort.Strings(l) +} + +func Float64s(l []float64) { + sort.Float64s(l) +} + +func Float32s(l []float32) { + sort.Sort(Float32Slice(l)) +} + +func Int64s(l []int64) { + sort.Sort(Int64Slice(l)) +} + +func Int32s(l []int32) { + sort.Sort(Int32Slice(l)) +} + +func Uint64s(l []uint64) { + sort.Sort(Uint64Slice(l)) +} + +func Uint32s(l []uint32) { + sort.Sort(Uint32Slice(l)) +} + +func Bools(l []bool) { + sort.Sort(BoolSlice(l)) +} + +type BoolSlice []bool + +func (p BoolSlice) Len() int { return len(p) } +func (p BoolSlice) Less(i, j int) bool { return p[j] } +func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int32Slice []int32 + +func (p Int32Slice) Len() int { return len(p) } +func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint32Slice []uint32 + +func (p Uint32Slice) Len() int { return len(p) } +func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Float32Slice []float32 + +func (p Float32Slice) Len() int { return len(p) } +func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/any.go b/extender/code/vendor/github.com/gogo/protobuf/types/any.go new file mode 100644 index 000000000..df4787de3 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/any.go @@ -0,0 +1,140 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/gogo/protobuf/proto" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func EmptyAny(any *Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = EmptyAny(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/any.pb.go b/extender/code/vendor/github.com/gogo/protobuf/types/any.pb.go new file mode 100644 index 000000000..98e269d54 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/any.pb.go @@ -0,0 +1,697 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/any.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_b53526c13ae22eb4, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return m.Size() +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (*Any) XXX_MessageName() string { + return "google.protobuf.Any" +} +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } + +var fileDescriptor_b53526c13ae22eb4 = []byte{ + // 211 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xaa, 0xbf, 0xf1, 0x50, 0x8e, + 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, + 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, + 0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0xf1, 0xc4, 0x63, 0x39, 0x46, 0x2e, 0xe1, 0xe4, + 0xfc, 0x5c, 0x3d, 0x34, 0xeb, 0x9d, 0x38, 0x1c, 0xf3, 0x2a, 0x03, 0x40, 0x9c, 0x00, 0xc6, 0x28, + 0x56, 0x90, 0x8d, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x94, + 0x06, 0x40, 0x95, 0xea, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94, + 0x25, 0xb1, 0x81, 0xcd, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x81, 0x82, 0xd3, 0xed, + 0x00, 0x00, 0x00, +} + +func (this *Any) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Any) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Any) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Any{") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAny(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Any) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Any) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintAny(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintAny(dAtA []byte, offset int, v uint64) int { + offset -= sovAny(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedAny(r randyAny, easy bool) *Any { + this := &Any{} + this.TypeUrl = string(randStringAny(r)) + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedAny(r, 3) + } + return this +} + +type randyAny interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneAny(r randyAny) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringAny(r randyAny) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneAny(r) + } + return string(tmps) +} +func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldAny(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateAny(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateAny(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Any) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovAny(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAny(x uint64) (n int) { + return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Any) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Any{`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringAny(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Any) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAny + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAny + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAny(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAny(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAny + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAny + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAny + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAny = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAny = fmt.Errorf("proto: unexpected end of group") +) diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/api.pb.go b/extender/code/vendor/github.com/gogo/protobuf/types/api.pb.go new file mode 100644 index 000000000..58bf4b53b --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/api.pb.go @@ -0,0 +1,2143 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/api.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +type Api struct { + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The methods of this interface, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` + // Any metadata attached to the interface. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // Included interfaces. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins,proto3" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Api) Reset() { *m = Api{} } +func (*Api) ProtoMessage() {} +func (*Api) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{0} +} +func (m *Api) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Api) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Api.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Api) XXX_Merge(src proto.Message) { + xxx_messageInfo_Api.Merge(m, src) +} +func (m *Api) XXX_Size() int { + return m.Size() +} +func (m *Api) XXX_DiscardUnknown() { + xxx_messageInfo_Api.DiscardUnknown(m) +} + +var xxx_messageInfo_Api proto.InternalMessageInfo + +func (m *Api) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Api) GetMethods() []*Method { + if m != nil { + return m.Methods + } + return nil +} + +func (m *Api) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Api) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Api) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Api) GetMixins() []*Mixin { + if m != nil { + return m.Mixins + } + return nil +} + +func (m *Api) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Api) XXX_MessageName() string { + return "google.protobuf.Api" +} + +// Method represents a method of an API interface. +type Method struct { + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl,proto3" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming,proto3" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl,proto3" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming,proto3" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*Option `protobuf:"bytes,6,rep,name=options,proto3" json:"options,omitempty"` + // The source syntax of this method. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Method) Reset() { *m = Method{} } +func (*Method) ProtoMessage() {} +func (*Method) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{1} +} +func (m *Method) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Method) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Method.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Method) XXX_Merge(src proto.Message) { + xxx_messageInfo_Method.Merge(m, src) +} +func (m *Method) XXX_Size() int { + return m.Size() +} +func (m *Method) XXX_DiscardUnknown() { + xxx_messageInfo_Method.DiscardUnknown(m) +} + +var xxx_messageInfo_Method proto.InternalMessageInfo + +func (m *Method) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Method) GetRequestTypeUrl() string { + if m != nil { + return m.RequestTypeUrl + } + return "" +} + +func (m *Method) GetRequestStreaming() bool { + if m != nil { + return m.RequestStreaming + } + return false +} + +func (m *Method) GetResponseTypeUrl() string { + if m != nil { + return m.ResponseTypeUrl + } + return "" +} + +func (m *Method) GetResponseStreaming() bool { + if m != nil { + return m.ResponseStreaming + } + return false +} + +func (m *Method) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Method) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Method) XXX_MessageName() string { + return "google.protobuf.Method" +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + // The fully qualified name of the interface which is included. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mixin) Reset() { *m = Mixin{} } +func (*Mixin) ProtoMessage() {} +func (*Mixin) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{2} +} +func (m *Mixin) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mixin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Mixin.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Mixin) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mixin.Merge(m, src) +} +func (m *Mixin) XXX_Size() int { + return m.Size() +} +func (m *Mixin) XXX_DiscardUnknown() { + xxx_messageInfo_Mixin.DiscardUnknown(m) +} + +var xxx_messageInfo_Mixin proto.InternalMessageInfo + +func (m *Mixin) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Mixin) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + +func (*Mixin) XXX_MessageName() string { + return "google.protobuf.Mixin" +} +func init() { + proto.RegisterType((*Api)(nil), "google.protobuf.Api") + proto.RegisterType((*Method)(nil), "google.protobuf.Method") + proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin") +} + +func init() { proto.RegisterFile("google/protobuf/api.proto", fileDescriptor_a2ec32096296c143) } + +var fileDescriptor_a2ec32096296c143 = []byte{ + // 467 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x31, 0x6f, 0x13, 0x31, + 0x14, 0xc7, 0xeb, 0xbb, 0xe4, 0x52, 0x5c, 0x91, 0x82, 0x91, 0xc0, 0x64, 0xb0, 0x4e, 0x15, 0xc3, + 0x09, 0xc4, 0x45, 0x94, 0x4f, 0xd0, 0x20, 0xd4, 0x01, 0x21, 0xa2, 0x0b, 0x08, 0x89, 0x25, 0x4a, + 0x83, 0x09, 0x96, 0xee, 0x6c, 0x63, 0x3b, 0x90, 0x4c, 0xf0, 0x59, 0x98, 0x10, 0x23, 0xdf, 0x80, + 0xad, 0x23, 0x23, 0x23, 0xb9, 0x2e, 0x8c, 0x1d, 0x19, 0x91, 0x7d, 0xe7, 0xa6, 0x5c, 0x83, 0x04, + 0x9b, 0xdf, 0xfb, 0xff, 0xfc, 0xf7, 0x7b, 0x7f, 0xc3, 0x9b, 0x33, 0x21, 0x66, 0x39, 0xed, 0x4b, + 0x25, 0x8c, 0x38, 0x9a, 0xbf, 0xea, 0x4f, 0x24, 0x4b, 0x5d, 0x81, 0x76, 0x2b, 0x29, 0xf5, 0x52, + 0xef, 0x56, 0x93, 0xd5, 0x62, 0xae, 0xa6, 0x74, 0x3c, 0x15, 0xdc, 0xd0, 0x85, 0xa9, 0xc0, 0x5e, + 0xaf, 0x49, 0x99, 0xa5, 0xac, 0x4d, 0xf6, 0xbe, 0x06, 0x30, 0x3c, 0x90, 0x0c, 0x21, 0xd8, 0xe2, + 0x93, 0x82, 0x62, 0x10, 0x83, 0xe4, 0x52, 0xe6, 0xce, 0xe8, 0x1e, 0xec, 0x14, 0xd4, 0xbc, 0x16, + 0x2f, 0x35, 0x0e, 0xe2, 0x30, 0xd9, 0xd9, 0xbf, 0x91, 0x36, 0x06, 0x48, 0x1f, 0x3b, 0x3d, 0xf3, + 0x9c, 0xbd, 0x22, 0xa4, 0x61, 0x82, 0x6b, 0x1c, 0xfe, 0xe5, 0xca, 0x13, 0xa7, 0x67, 0x9e, 0x43, + 0x18, 0x76, 0xde, 0x52, 0xa5, 0x99, 0xe0, 0xb8, 0xe5, 0x1e, 0xf7, 0x25, 0x7a, 0x08, 0xbb, 0x7f, + 0xee, 0x83, 0xdb, 0x31, 0x48, 0x76, 0xf6, 0xc9, 0x05, 0xcf, 0x91, 0xc3, 0x1e, 0x54, 0x54, 0x76, + 0x59, 0x9f, 0x2f, 0x51, 0x0a, 0xa3, 0x82, 0x2d, 0x18, 0xd7, 0x38, 0x72, 0x23, 0x5d, 0xbf, 0xb8, + 0x85, 0x95, 0xb3, 0x9a, 0x42, 0x7d, 0x18, 0xe9, 0x25, 0x37, 0x93, 0x05, 0xee, 0xc4, 0x20, 0xe9, + 0x6e, 0x58, 0x61, 0xe4, 0xe4, 0xac, 0xc6, 0xf6, 0xbe, 0x04, 0x30, 0xaa, 0x82, 0xd8, 0x18, 0x63, + 0x02, 0xaf, 0x28, 0xfa, 0x66, 0x4e, 0xb5, 0x19, 0xdb, 0xe0, 0xc7, 0x73, 0x95, 0xe3, 0xc0, 0xe9, + 0xdd, 0xba, 0xff, 0x74, 0x29, 0xe9, 0x33, 0x95, 0xa3, 0x3b, 0xf0, 0xaa, 0x27, 0xb5, 0x51, 0x74, + 0x52, 0x30, 0x3e, 0xc3, 0x61, 0x0c, 0x92, 0xed, 0xcc, 0x5b, 0x8c, 0x7c, 0x1f, 0xdd, 0xb6, 0xb0, + 0x96, 0x82, 0x6b, 0xba, 0xf6, 0xad, 0x12, 0xdc, 0xf5, 0x82, 0x37, 0xbe, 0x0b, 0xd1, 0x19, 0xbb, + 0x76, 0x6e, 0x3b, 0xe7, 0x33, 0x97, 0xb5, 0xf5, 0xb9, 0x5f, 0x8c, 0xfe, 0xf1, 0x17, 0xff, 0x3b, + 0xb4, 0x3e, 0x6c, 0xbb, 0xd8, 0x37, 0x46, 0x86, 0x60, 0x4b, 0x09, 0x61, 0xea, 0x98, 0xdc, 0x79, + 0xf0, 0xfe, 0xfb, 0x8a, 0x6c, 0x9d, 0xae, 0x08, 0xf8, 0xb5, 0x22, 0xe0, 0x43, 0x49, 0xc0, 0xa7, + 0x92, 0x80, 0xe3, 0x92, 0x80, 0x6f, 0x25, 0x01, 0x3f, 0x4a, 0x02, 0x7e, 0x96, 0x64, 0xeb, 0xd4, + 0xf6, 0x4f, 0x08, 0x38, 0x3e, 0x21, 0x00, 0x5e, 0x9b, 0x8a, 0xa2, 0x39, 0xc6, 0x60, 0xfb, 0x40, + 0xb2, 0xa1, 0x2d, 0x86, 0xe0, 0x45, 0xdb, 0xe6, 0xa6, 0x3f, 0x06, 0xe1, 0xe1, 0x70, 0xf0, 0x39, + 0x20, 0x87, 0x15, 0x3a, 0xf4, 0x13, 0x3f, 0xa7, 0x79, 0xfe, 0x88, 0x8b, 0x77, 0xdc, 0xc6, 0xa8, + 0x8f, 0x22, 0xe7, 0x71, 0xff, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x64, 0x40, 0x40, 0xa1, + 0x03, 0x00, 0x00, +} + +func (this *Api) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Methods) != len(that1.Methods) { + if len(this.Methods) < len(that1.Methods) { + return -1 + } + return 1 + } + for i := range this.Methods { + if c := this.Methods[i].Compare(that1.Methods[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Version != that1.Version { + if this.Version < that1.Version { + return -1 + } + return 1 + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if len(this.Mixins) != len(that1.Mixins) { + if len(this.Mixins) < len(that1.Mixins) { + return -1 + } + return 1 + } + for i := range this.Mixins { + if c := this.Mixins[i].Compare(that1.Mixins[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Method) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + if this.RequestTypeUrl < that1.RequestTypeUrl { + return -1 + } + return 1 + } + if this.RequestStreaming != that1.RequestStreaming { + if !this.RequestStreaming { + return -1 + } + return 1 + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + if this.ResponseTypeUrl < that1.ResponseTypeUrl { + return -1 + } + return 1 + } + if this.ResponseStreaming != that1.ResponseStreaming { + if !this.ResponseStreaming { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Mixin) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Root != that1.Root { + if this.Root < that1.Root { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Api) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Methods) != len(that1.Methods) { + return false + } + for i := range this.Methods { + if !this.Methods[i].Equal(that1.Methods[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Version != that1.Version { + return false + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if len(this.Mixins) != len(that1.Mixins) { + return false + } + for i := range this.Mixins { + if !this.Mixins[i].Equal(that1.Mixins[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Method) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + return false + } + if this.RequestStreaming != that1.RequestStreaming { + return false + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + return false + } + if this.ResponseStreaming != that1.ResponseStreaming { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Mixin) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Root != that1.Root { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Api) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Api{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Methods != nil { + s = append(s, "Methods: "+fmt.Sprintf("%#v", this.Methods)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + if this.Mixins != nil { + s = append(s, "Mixins: "+fmt.Sprintf("%#v", this.Mixins)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Method) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Method{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "RequestTypeUrl: "+fmt.Sprintf("%#v", this.RequestTypeUrl)+",\n") + s = append(s, "RequestStreaming: "+fmt.Sprintf("%#v", this.RequestStreaming)+",\n") + s = append(s, "ResponseTypeUrl: "+fmt.Sprintf("%#v", this.ResponseTypeUrl)+",\n") + s = append(s, "ResponseStreaming: "+fmt.Sprintf("%#v", this.ResponseStreaming)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Mixin) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Mixin{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Root: "+fmt.Sprintf("%#v", this.Root)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringApi(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Api) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Api) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Api) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x38 + } + if len(m.Mixins) > 0 { + for iNdEx := len(m.Mixins) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mixins[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintApi(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Methods) > 0 { + for iNdEx := len(m.Methods) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Methods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Method) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Method) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Method) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x38 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.ResponseStreaming { + i-- + if m.ResponseStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ResponseTypeUrl) > 0 { + i -= len(m.ResponseTypeUrl) + copy(dAtA[i:], m.ResponseTypeUrl) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResponseTypeUrl))) + i-- + dAtA[i] = 0x22 + } + if m.RequestStreaming { + i-- + if m.RequestStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.RequestTypeUrl) > 0 { + i -= len(m.RequestTypeUrl) + copy(dAtA[i:], m.RequestTypeUrl) + i = encodeVarintApi(dAtA, i, uint64(len(m.RequestTypeUrl))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Mixin) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mixin) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Mixin) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Root) > 0 { + i -= len(m.Root) + copy(dAtA[i:], m.Root) + i = encodeVarintApi(dAtA, i, uint64(len(m.Root))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + offset -= sovApi(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedApi(r randyApi, easy bool) *Api { + this := &Api{} + this.Name = string(randStringApi(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Methods = make([]*Method, v1) + for i := 0; i < v1; i++ { + this.Methods[i] = NewPopulatedMethod(r, easy) + } + } + if r.Intn(5) != 0 { + v2 := r.Intn(5) + this.Options = make([]*Option, v2) + for i := 0; i < v2; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Version = string(randStringApi(r)) + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Mixins = make([]*Mixin, v3) + for i := 0; i < v3; i++ { + this.Mixins[i] = NewPopulatedMixin(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMethod(r randyApi, easy bool) *Method { + this := &Method{} + this.Name = string(randStringApi(r)) + this.RequestTypeUrl = string(randStringApi(r)) + this.RequestStreaming = bool(bool(r.Intn(2) == 0)) + this.ResponseTypeUrl = string(randStringApi(r)) + this.ResponseStreaming = bool(bool(r.Intn(2) == 0)) + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMixin(r randyApi, easy bool) *Mixin { + this := &Mixin{} + this.Name = string(randStringApi(r)) + this.Root = string(randStringApi(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 3) + } + return this +} + +type randyApi interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneApi(r randyApi) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringApi(r randyApi) string { + v5 := r.Intn(100) + tmps := make([]rune, v5) + for i := 0; i < v5; i++ { + tmps[i] = randUTF8RuneApi(r) + } + return string(tmps) +} +func randUnrecognizedApi(r randyApi, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldApi(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldApi(dAtA []byte, r randyApi, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + v6 := r.Int63() + if r.Intn(2) == 0 { + v6 *= -1 + } + dAtA = encodeVarintPopulateApi(dAtA, uint64(v6)) + case 1: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateApi(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateApi(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Api) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Methods) > 0 { + for _, e := range m.Methods { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Mixins) > 0 { + for _, e := range m.Mixins { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Method) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RequestTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.RequestStreaming { + n += 2 + } + l = len(m.ResponseTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ResponseStreaming { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Mixin) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovApi(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Api) String() string { + if this == nil { + return "nil" + } + repeatedStringForMethods := "[]*Method{" + for _, f := range this.Methods { + repeatedStringForMethods += strings.Replace(f.String(), "Method", "Method", 1) + "," + } + repeatedStringForMethods += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + repeatedStringForMixins := "[]*Mixin{" + for _, f := range this.Mixins { + repeatedStringForMixins += strings.Replace(f.String(), "Mixin", "Mixin", 1) + "," + } + repeatedStringForMixins += "}" + s := strings.Join([]string{`&Api{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Methods:` + repeatedStringForMethods + `,`, + `Options:` + repeatedStringForOptions + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Mixins:` + repeatedStringForMixins + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Method) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Method{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RequestTypeUrl:` + fmt.Sprintf("%v", this.RequestTypeUrl) + `,`, + `RequestStreaming:` + fmt.Sprintf("%v", this.RequestStreaming) + `,`, + `ResponseTypeUrl:` + fmt.Sprintf("%v", this.ResponseTypeUrl) + `,`, + `ResponseStreaming:` + fmt.Sprintf("%v", this.ResponseStreaming) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Mixin) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mixin{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Root:` + fmt.Sprintf("%v", this.Root) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Api) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Api: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Api: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Methods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Methods = append(m.Methods, &Method{}) + if err := m.Methods[len(m.Methods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mixins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mixins = append(m.Mixins, &Mixin{}) + if err := m.Mixins[len(m.Mixins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Method) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Method: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Method: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequestStreaming = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResponseTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ResponseStreaming = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mixin) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mixin: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mixin: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Root = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthApi + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupApi + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthApi + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") +) diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/doc.go b/extender/code/vendor/github.com/gogo/protobuf/types/doc.go new file mode 100644 index 000000000..ff2810af1 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package types contains code for interacting with well-known types. +*/ +package types diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/duration.go b/extender/code/vendor/github.com/gogo/protobuf/types/duration.go new file mode 100644 index 000000000..979b8e78a --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func DurationFromProto(p *Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) * time.Nanosecond + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func DurationProto(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/duration.pb.go b/extender/code/vendor/github.com/gogo/protobuf/types/duration.pb.go new file mode 100644 index 000000000..3959f0669 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/duration.pb.go @@ -0,0 +1,520 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_23597b2ebd7ac6c5, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func (*Duration) XXX_MessageName() string { + return "google.protobuf.Duration" +} +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } + +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0x7f, 0xe3, 0xa1, 0x1c, + 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, + 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, + 0xb1, 0x1c, 0xe3, 0x89, 0xc7, 0x72, 0x8c, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x56, 0x3b, + 0xf1, 0xc2, 0x2c, 0x0e, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff, + 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00, + 0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89, + 0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x1c, 0x64, 0x4e, 0xf6, 0x00, 0x00, + 0x00, +} + +func (this *Duration) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Duration) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Duration) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Duration{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDuration(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintDuration(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintDuration(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintDuration(dAtA []byte, offset int, v uint64) int { + offset -= sovDuration(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovDuration(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovDuration(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovDuration(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDuration(x uint64) (n int) { + return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDuration(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDuration(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDuration + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDuration + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDuration + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDuration = fmt.Errorf("proto: unexpected end of group") +) diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/duration_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/types/duration_gogo.go new file mode 100644 index 000000000..90e7670e2 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/duration_gogo.go @@ -0,0 +1,100 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "fmt" + "time" +) + +func NewPopulatedDuration(r interface { + Int63() int64 +}, easy bool) *Duration { + this := &Duration{} + maxSecs := time.Hour.Nanoseconds() / 1e9 + max := 2 * maxSecs + s := int64(r.Int63()) % max + s -= maxSecs + neg := int64(1) + if s < 0 { + neg = -1 + } + this.Seconds = s + this.Nanos = int32(neg * (r.Int63() % 1e9)) + return this +} + +func (d *Duration) String() string { + td, err := DurationFromProto(d) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return td.String() +} + +func NewPopulatedStdDuration(r interface { + Int63() int64 +}, easy bool) *time.Duration { + dur := NewPopulatedDuration(r, easy) + d, err := DurationFromProto(dur) + if err != nil { + return nil + } + return &d +} + +func SizeOfStdDuration(d time.Duration) int { + dur := DurationProto(d) + return dur.Size() +} + +func StdDurationMarshal(d time.Duration) ([]byte, error) { + size := SizeOfStdDuration(d) + buf := make([]byte, size) + _, err := StdDurationMarshalTo(d, buf) + return buf, err +} + +func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) { + dur := DurationProto(d) + return dur.MarshalTo(data) +} + +func StdDurationUnmarshal(d *time.Duration, data []byte) error { + dur := &Duration{} + if err := dur.Unmarshal(data); err != nil { + return err + } + dd, err := DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/empty.pb.go b/extender/code/vendor/github.com/gogo/protobuf/types/empty.pb.go new file mode 100644 index 000000000..17e3aa558 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/empty.pb.go @@ -0,0 +1,465 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_900544acb223d5b8, []int{0} +} +func (*Empty) XXX_WellKnownType() string { return "Empty" } +func (m *Empty) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return m.Size() +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func (*Empty) XXX_MessageName() string { + return "google.protobuf.Empty" +} +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) } + +var fileDescriptor_900544acb223d5b8 = []byte{ + // 176 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x0b, 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, + 0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, + 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, + 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe8, 0xc4, 0x05, + 0x36, 0x2e, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, + 0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd, + 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, + 0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0xbe, 0xb6, 0x31, 0xc6, 0x00, 0x00, 0x00, +} + +func (this *Empty) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Empty) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Empty) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&types.Empty{") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEmpty(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Empty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int { + offset -= sovEmpty(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty { + this := &Empty{} + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedEmpty(r, 1) + } + return this +} + +type randyEmpty interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneEmpty(r randyEmpty) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringEmpty(r randyEmpty) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneEmpty(r) + } + return string(tmps) +} +func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Empty) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEmpty(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEmpty(x uint64) (n int) { + return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Empty) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Empty{`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringEmpty(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Empty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEmpty + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipEmpty(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEmpty(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEmpty + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEmpty + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEmpty + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEmpty = fmt.Errorf("proto: unexpected end of group") +) diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/extender/code/vendor/github.com/gogo/protobuf/types/field_mask.pb.go new file mode 100644 index 000000000..7226b57f7 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/field_mask.pb.go @@ -0,0 +1,741 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { + return fileDescriptor_5158202634f0da48, []int{0} +} +func (m *FieldMask) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FieldMask) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldMask.Merge(m, src) +} +func (m *FieldMask) XXX_Size() int { + return m.Size() +} +func (m *FieldMask) XXX_DiscardUnknown() { + xxx_messageInfo_FieldMask.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldMask proto.InternalMessageInfo + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func (*FieldMask) XXX_MessageName() string { + return "google.protobuf.FieldMask" +} +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) } + +var fileDescriptor_5158202634f0da48 = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x1d, 0x8c, + 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, + 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, + 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7, + 0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0x95, 0x13, 0x1f, 0xdc, 0xa2, 0x00, 0x90, 0x50, + 0x00, 0x63, 0x14, 0x6b, 0x49, 0x65, 0x41, 0x6a, 0xf1, 0x0f, 0x46, 0xc6, 0x45, 0x4c, 0xcc, 0xee, + 0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x7a, 0x02, 0xa0, 0x7a, 0xf4, 0xc2, 0x53, 0x73, 0x72, + 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x2a, 0x93, 0xd8, 0xc0, 0x86, 0x19, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x43, 0xa0, 0x83, 0xd0, 0xe9, 0x00, 0x00, 0x00, +} + +func (this *FieldMask) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Paths) != len(that1.Paths) { + if len(this.Paths) < len(that1.Paths) { + return -1 + } + return 1 + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + if this.Paths[i] < that1.Paths[i] { + return -1 + } + return 1 + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *FieldMask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Paths) != len(that1.Paths) { + return false + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *FieldMask) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FieldMask{") + s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFieldMask(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *FieldMask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldMask) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintFieldMask(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int { + offset -= sovFieldMask(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask { + this := &FieldMask{} + v1 := r.Intn(10) + this.Paths = make([]string, v1) + for i := 0; i < v1; i++ { + this.Paths[i] = string(randStringFieldMask(r)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedFieldMask(r, 2) + } + return this +} + +type randyFieldMask interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneFieldMask(r randyFieldMask) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringFieldMask(r randyFieldMask) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneFieldMask(r) + } + return string(tmps) +} +func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *FieldMask) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovFieldMask(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovFieldMask(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFieldMask(x uint64) (n int) { + return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FieldMask) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldMask{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringFieldMask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FieldMask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldMask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFieldMask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFieldMask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFieldMask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFieldMask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFieldMask + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupFieldMask + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthFieldMask + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupFieldMask = fmt.Errorf("proto: unexpected end of group") +) diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/protosize.go b/extender/code/vendor/github.com/gogo/protobuf/types/protosize.go new file mode 100644 index 000000000..3a2d1b7e1 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/protosize.go @@ -0,0 +1,34 @@ +package types + +func (m *Any) ProtoSize() (n int) { return m.Size() } +func (m *Api) ProtoSize() (n int) { return m.Size() } +func (m *Method) ProtoSize() (n int) { return m.Size() } +func (m *Mixin) ProtoSize() (n int) { return m.Size() } +func (m *Duration) ProtoSize() (n int) { return m.Size() } +func (m *Empty) ProtoSize() (n int) { return m.Size() } +func (m *FieldMask) ProtoSize() (n int) { return m.Size() } +func (m *SourceContext) ProtoSize() (n int) { return m.Size() } +func (m *Struct) ProtoSize() (n int) { return m.Size() } +func (m *Value) ProtoSize() (n int) { return m.Size() } +func (m *Value_NullValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_NumberValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_StringValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_BoolValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_StructValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_ListValue) ProtoSize() (n int) { return m.Size() } +func (m *ListValue) ProtoSize() (n int) { return m.Size() } +func (m *Timestamp) ProtoSize() (n int) { return m.Size() } +func (m *Type) ProtoSize() (n int) { return m.Size() } +func (m *Field) ProtoSize() (n int) { return m.Size() } +func (m *Enum) ProtoSize() (n int) { return m.Size() } +func (m *EnumValue) ProtoSize() (n int) { return m.Size() } +func (m *Option) ProtoSize() (n int) { return m.Size() } +func (m *DoubleValue) ProtoSize() (n int) { return m.Size() } +func (m *FloatValue) ProtoSize() (n int) { return m.Size() } +func (m *Int64Value) ProtoSize() (n int) { return m.Size() } +func (m *UInt64Value) ProtoSize() (n int) { return m.Size() } +func (m *Int32Value) ProtoSize() (n int) { return m.Size() } +func (m *UInt32Value) ProtoSize() (n int) { return m.Size() } +func (m *BoolValue) ProtoSize() (n int) { return m.Size() } +func (m *StringValue) ProtoSize() (n int) { return m.Size() } +func (m *BytesValue) ProtoSize() (n int) { return m.Size() } diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/source_context.pb.go b/extender/code/vendor/github.com/gogo/protobuf/types/source_context.pb.go new file mode 100644 index 000000000..61045ce10 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/source_context.pb.go @@ -0,0 +1,527 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/source_context.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceContext) Reset() { *m = SourceContext{} } +func (*SourceContext) ProtoMessage() {} +func (*SourceContext) Descriptor() ([]byte, []int) { + return fileDescriptor_b686cdb126d509db, []int{0} +} +func (m *SourceContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SourceContext.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SourceContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceContext.Merge(m, src) +} +func (m *SourceContext) XXX_Size() int { + return m.Size() +} +func (m *SourceContext) XXX_DiscardUnknown() { + xxx_messageInfo_SourceContext.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceContext proto.InternalMessageInfo + +func (m *SourceContext) GetFileName() string { + if m != nil { + return m.FileName + } + return "" +} + +func (*SourceContext) XXX_MessageName() string { + return "google.protobuf.SourceContext" +} +func init() { + proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext") +} + +func init() { + proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_b686cdb126d509db) +} + +var fileDescriptor_b686cdb126d509db = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d, + 0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43, + 0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49, + 0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x6f, 0x3c, 0x94, 0x63, + 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, + 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, + 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39, + 0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac, + 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, + 0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, + 0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x37, 0x2a, 0xa1, + 0xf9, 0x00, 0x00, 0x00, +} + +func (this *SourceContext) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.FileName != that1.FileName { + if this.FileName < that1.FileName { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *SourceContext) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FileName != that1.FileName { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *SourceContext) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.SourceContext{") + s = append(s, "FileName: "+fmt.Sprintf("%#v", this.FileName)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSourceContext(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SourceContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.FileName) > 0 { + i -= len(m.FileName) + copy(dAtA[i:], m.FileName) + i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int { + offset -= sovSourceContext(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext { + this := &SourceContext{} + this.FileName = string(randStringSourceContext(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedSourceContext(r, 2) + } + return this +} + +type randySourceContext interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneSourceContext(r randySourceContext) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringSourceContext(r randySourceContext) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneSourceContext(r) + } + return string(tmps) +} +func randUnrecognizedSourceContext(r randySourceContext, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldSourceContext(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldSourceContext(dAtA []byte, r randySourceContext, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateSourceContext(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *SourceContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FileName) + if l > 0 { + n += 1 + l + sovSourceContext(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovSourceContext(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSourceContext(x uint64) (n int) { + return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SourceContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceContext{`, + `FileName:` + fmt.Sprintf("%v", this.FileName) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringSourceContext(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SourceContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSourceContext + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSourceContext + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FileName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSourceContext(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSourceContext(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSourceContext + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSourceContext + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSourceContext + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSourceContext = fmt.Errorf("proto: unexpected end of group") +) diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/struct.pb.go b/extender/code/vendor/github.com/gogo/protobuf/types/struct.pb.go new file mode 100644 index 000000000..cea553eef --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/struct.pb.go @@ -0,0 +1,2280 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package types + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} + +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(m, src) +} +func (m *Struct) XXX_Size() int { + return m.Size() +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +func (*Struct) XXX_MessageName() string { + return "google.protobuf.Struct" +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return m.Size() +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int + Compare(interface{}) int +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof" json:"null_value,omitempty"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof" json:"number_value,omitempty"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof" json:"struct_value,omitempty"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof" json:"list_value,omitempty"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Value) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func (*Value) XXX_MessageName() string { + return "google.protobuf.Value" +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) +} +func (m *ListValue) XXX_Size() int { + return m.Size() +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func (*ListValue) XXX_MessageName() string { + return "google.protobuf.ListValue" +} +func init() { + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } + +var fileDescriptor_df322afd6c9fb402 = []byte{ + // 443 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xb1, 0x6f, 0xd3, 0x40, + 0x14, 0xc6, 0xfd, 0x9c, 0xc6, 0x22, 0xcf, 0xa8, 0x54, 0x87, 0x04, 0x51, 0x41, 0x47, 0x94, 0x2e, + 0x11, 0x42, 0xae, 0x14, 0x16, 0x44, 0x58, 0x88, 0x54, 0x5a, 0x89, 0xa8, 0x32, 0x86, 0x16, 0x89, + 0x25, 0xc2, 0xae, 0x1b, 0x59, 0xbd, 0xde, 0x55, 0xf6, 0x1d, 0x28, 0x1b, 0x0b, 0xff, 0x03, 0x33, + 0x13, 0x62, 0xe4, 0xaf, 0xe8, 0xc8, 0xc8, 0x48, 0xdc, 0x85, 0xb1, 0x63, 0x47, 0x74, 0x77, 0xb6, + 0x41, 0x8d, 0xb2, 0xf9, 0x7d, 0xf7, 0x7b, 0xdf, 0x7b, 0xdf, 0x33, 0xde, 0x9f, 0x09, 0x31, 0x63, + 0xe9, 0xf6, 0x59, 0x2e, 0xa4, 0x88, 0xd5, 0xf1, 0x76, 0x21, 0x73, 0x95, 0xc8, 0xc0, 0xd4, 0xe4, + 0x96, 0x7d, 0x0d, 0xea, 0xd7, 0xfe, 0x17, 0x40, 0xef, 0xb5, 0x21, 0xc8, 0x08, 0xbd, 0xe3, 0x2c, + 0x65, 0x47, 0x45, 0x17, 0x7a, 0xad, 0x81, 0x3f, 0xdc, 0x0a, 0xae, 0xc1, 0x81, 0x05, 0x83, 0x17, + 0x86, 0xda, 0xe1, 0x32, 0x9f, 0x47, 0x55, 0xcb, 0xe6, 0x2b, 0xf4, 0xff, 0x93, 0xc9, 0x06, 0xb6, + 0x4e, 0xd2, 0x79, 0x17, 0x7a, 0x30, 0xe8, 0x44, 0xfa, 0x93, 0x3c, 0xc2, 0xf6, 0x87, 0xf7, 0x4c, + 0xa5, 0x5d, 0xb7, 0x07, 0x03, 0x7f, 0x78, 0x67, 0xc9, 0xfc, 0x50, 0xbf, 0x46, 0x16, 0x7a, 0xea, + 0x3e, 0x81, 0xfe, 0x0f, 0x17, 0xdb, 0x46, 0x24, 0x23, 0x44, 0xae, 0x18, 0x9b, 0x5a, 0x03, 0x6d, + 0xba, 0x3e, 0xdc, 0x5c, 0x32, 0xd8, 0x57, 0x8c, 0x19, 0x7e, 0xcf, 0x89, 0x3a, 0xbc, 0x2e, 0xc8, + 0x16, 0xde, 0xe4, 0xea, 0x34, 0x4e, 0xf3, 0xe9, 0xbf, 0xf9, 0xb0, 0xe7, 0x44, 0xbe, 0x55, 0x1b, + 0xa8, 0x90, 0x79, 0xc6, 0x67, 0x15, 0xd4, 0xd2, 0x8b, 0x6b, 0xc8, 0xaa, 0x16, 0x7a, 0x80, 0x18, + 0x0b, 0x51, 0xaf, 0xb1, 0xd6, 0x83, 0xc1, 0x0d, 0x3d, 0x4a, 0x6b, 0x16, 0x78, 0x66, 0x5c, 0x54, + 0x22, 0x2b, 0xa4, 0x6d, 0xa2, 0xde, 0x5d, 0x71, 0xc7, 0xca, 0x5e, 0x25, 0xb2, 0x49, 0xc9, 0xb2, + 0xa2, 0xee, 0xf5, 0x4c, 0xef, 0x72, 0xca, 0x49, 0x56, 0xc8, 0x26, 0x25, 0xab, 0x8b, 0xb1, 0x87, + 0x6b, 0x27, 0x19, 0x3f, 0xea, 0x8f, 0xb0, 0xd3, 0x10, 0x24, 0x40, 0xcf, 0x98, 0xd5, 0x7f, 0x74, + 0xd5, 0xd1, 0x2b, 0xea, 0xe1, 0x3d, 0xec, 0x34, 0x47, 0x24, 0xeb, 0x88, 0xfb, 0x07, 0x93, 0xc9, + 0xf4, 0xf0, 0xf9, 0xe4, 0x60, 0x67, 0xc3, 0x19, 0x7f, 0x86, 0x5f, 0x0b, 0xea, 0x5c, 0x2e, 0x28, + 0x5c, 0x2d, 0x28, 0x7c, 0x2a, 0x29, 0x7c, 0x2b, 0x29, 0x9c, 0x97, 0x14, 0x7e, 0x96, 0x14, 0x7e, + 0x97, 0x14, 0xfe, 0x94, 0xd4, 0xb9, 0xd4, 0xfa, 0x05, 0x85, 0xf3, 0x0b, 0x0a, 0x78, 0x3b, 0x11, + 0xa7, 0xd7, 0x47, 0x8e, 0x7d, 0x9b, 0x3e, 0xd4, 0x75, 0x08, 0xef, 0xda, 0x72, 0x7e, 0x96, 0x16, + 0x57, 0x00, 0x5f, 0xdd, 0xd6, 0x6e, 0x38, 0xfe, 0xee, 0xd2, 0x5d, 0xdb, 0x10, 0xd6, 0x3b, 0xbe, + 0x4d, 0x19, 0x7b, 0xc9, 0xc5, 0x47, 0xfe, 0x46, 0x93, 0xb1, 0x67, 0x9c, 0x1e, 0xff, 0x0d, 0x00, + 0x00, 0xff, 0xff, 0x26, 0x30, 0xdb, 0xbe, 0xe9, 0x02, 0x00, 0x00, +} + +func (this *Struct) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if that1.Kind == nil { + if this.Kind != nil { + return 1 + } + } else if this.Kind == nil { + return -1 + } else { + thisType := -1 + switch this.Kind.(type) { + case *Value_NullValue: + thisType = 0 + case *Value_NumberValue: + thisType = 1 + case *Value_StringValue: + thisType = 2 + case *Value_BoolValue: + thisType = 3 + case *Value_StructValue: + thisType = 4 + case *Value_ListValue: + thisType = 5 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", this.Kind)) + } + that1Type := -1 + switch that1.Kind.(type) { + case *Value_NullValue: + that1Type = 0 + case *Value_NumberValue: + that1Type = 1 + case *Value_StringValue: + that1Type = 2 + case *Value_BoolValue: + that1Type = 3 + case *Value_StructValue: + that1Type = 4 + case *Value_ListValue: + that1Type = 5 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", that1.Kind)) + } + if thisType == that1Type { + if c := this.Kind.Compare(that1.Kind); c != 0 { + return c + } + } else if thisType < that1Type { + return -1 + } else if thisType > that1Type { + return 1 + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Value_NullValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.NullValue != that1.NullValue { + if this.NullValue < that1.NullValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_NumberValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.NumberValue != that1.NumberValue { + if this.NumberValue < that1.NumberValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.StringValue != that1.StringValue { + if this.StringValue < that1.StringValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.BoolValue != that1.BoolValue { + if !this.BoolValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_StructValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.StructValue.Compare(that1.StructValue); c != 0 { + return c + } + return 0 +} +func (this *Value_ListValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.ListValue.Compare(that1.ListValue); c != 0 { + return c + } + return 0 +} +func (this *ListValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Values) != len(that1.Values) { + if len(this.Values) < len(that1.Values) { + return -1 + } + return 1 + } + for i := range this.Values { + if c := this.Values[i].Compare(that1.Values[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x NullValue) String() string { + s, ok := NullValue_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Struct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Kind == nil { + if this.Kind != nil { + return false + } + } else if this.Kind == nil { + return false + } else if !this.Kind.Equal(that1.Kind) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Value_NullValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NullValue != that1.NullValue { + return false + } + return true +} +func (this *Value_NumberValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NumberValue != that1.NumberValue { + return false + } + return true +} +func (this *Value_StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StringValue != that1.StringValue { + return false + } + return true +} +func (this *Value_BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BoolValue != that1.BoolValue { + return false + } + return true +} +func (this *Value_StructValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StructValue.Equal(that1.StructValue) { + return false + } + return true +} +func (this *Value_ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ListValue.Equal(that1.ListValue) { + return false + } + return true +} +func (this *ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if !this.Values[i].Equal(that1.Values[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Struct) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Struct{") + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%#v: %#v,", k, this.Fields[k]) + } + mapStringForFields += "}" + if this.Fields != nil { + s = append(s, "Fields: "+mapStringForFields+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Value{") + if this.Kind != nil { + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_NullValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NullValue{` + + `NullValue:` + fmt.Sprintf("%#v", this.NullValue) + `}`}, ", ") + return s +} +func (this *Value_NumberValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NumberValue{` + + `NumberValue:` + fmt.Sprintf("%#v", this.NumberValue) + `}`}, ", ") + return s +} +func (this *Value_StringValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StringValue{` + + `StringValue:` + fmt.Sprintf("%#v", this.StringValue) + `}`}, ", ") + return s +} +func (this *Value_BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_BoolValue{` + + `BoolValue:` + fmt.Sprintf("%#v", this.BoolValue) + `}`}, ", ") + return s +} +func (this *Value_StructValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StructValue{` + + `StructValue:` + fmt.Sprintf("%#v", this.StructValue) + `}`}, ", ") + return s +} +func (this *Value_ListValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_ListValue{` + + `ListValue:` + fmt.Sprintf("%#v", this.ListValue) + `}`}, ", ") + return s +} +func (this *ListValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.ListValue{") + if this.Values != nil { + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStruct(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Struct) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Struct) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Fields) > 0 { + for k := range m.Fields { + v := m.Fields[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintStruct(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintStruct(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Kind != nil { + { + size := m.Kind.Size() + i -= size + if _, err := m.Kind.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Value_NullValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_NullValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintStruct(dAtA, i, uint64(m.NullValue)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Value_NumberValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_NumberValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Value_StringValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = encodeVarintStruct(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Value_BoolValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Value_StructValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_StructValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StructValue != nil { + { + size, err := m.StructValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Value_ListValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListValue != nil { + { + size, err := m.ListValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ListValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintStruct(dAtA []byte, offset int, v uint64) int { + offset -= sovStruct(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedStruct(r randyStruct, easy bool) *Struct { + this := &Struct{} + if r.Intn(5) == 0 { + v1 := r.Intn(10) + this.Fields = make(map[string]*Value) + for i := 0; i < v1; i++ { + this.Fields[randStringStruct(r)] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) + } + return this +} + +func NewPopulatedValue(r randyStruct, easy bool) *Value { + this := &Value{} + oneofNumber_Kind := []int32{1, 2, 3, 4, 5, 6}[r.Intn(6)] + switch oneofNumber_Kind { + case 1: + this.Kind = NewPopulatedValue_NullValue(r, easy) + case 2: + this.Kind = NewPopulatedValue_NumberValue(r, easy) + case 3: + this.Kind = NewPopulatedValue_StringValue(r, easy) + case 4: + this.Kind = NewPopulatedValue_BoolValue(r, easy) + case 5: + this.Kind = NewPopulatedValue_StructValue(r, easy) + case 6: + this.Kind = NewPopulatedValue_ListValue(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 7) + } + return this +} + +func NewPopulatedValue_NullValue(r randyStruct, easy bool) *Value_NullValue { + this := &Value_NullValue{} + this.NullValue = NullValue([]int32{0}[r.Intn(1)]) + return this +} +func NewPopulatedValue_NumberValue(r randyStruct, easy bool) *Value_NumberValue { + this := &Value_NumberValue{} + this.NumberValue = float64(r.Float64()) + if r.Intn(2) == 0 { + this.NumberValue *= -1 + } + return this +} +func NewPopulatedValue_StringValue(r randyStruct, easy bool) *Value_StringValue { + this := &Value_StringValue{} + this.StringValue = string(randStringStruct(r)) + return this +} +func NewPopulatedValue_BoolValue(r randyStruct, easy bool) *Value_BoolValue { + this := &Value_BoolValue{} + this.BoolValue = bool(bool(r.Intn(2) == 0)) + return this +} +func NewPopulatedValue_StructValue(r randyStruct, easy bool) *Value_StructValue { + this := &Value_StructValue{} + this.StructValue = NewPopulatedStruct(r, easy) + return this +} +func NewPopulatedValue_ListValue(r randyStruct, easy bool) *Value_ListValue { + this := &Value_ListValue{} + this.ListValue = NewPopulatedListValue(r, easy) + return this +} +func NewPopulatedListValue(r randyStruct, easy bool) *ListValue { + this := &ListValue{} + if r.Intn(5) == 0 { + v2 := r.Intn(5) + this.Values = make([]*Value, v2) + for i := 0; i < v2; i++ { + this.Values[i] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) + } + return this +} + +type randyStruct interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneStruct(r randyStruct) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringStruct(r randyStruct) string { + v3 := r.Intn(100) + tmps := make([]rune, v3) + for i := 0; i < v3; i++ { + tmps[i] = randUTF8RuneStruct(r) + } + return string(tmps) +} +func randUnrecognizedStruct(r randyStruct, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldStruct(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldStruct(dAtA []byte, r randyStruct, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + v4 := r.Int63() + if r.Intn(2) == 0 { + v4 *= -1 + } + dAtA = encodeVarintPopulateStruct(dAtA, uint64(v4)) + case 1: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateStruct(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateStruct(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Struct) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for k, v := range m.Fields { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovStruct(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovStruct(uint64(len(k))) + l + n += mapEntrySize + 1 + sovStruct(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != nil { + n += m.Kind.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value_NullValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovStruct(uint64(m.NullValue)) + return n +} +func (m *Value_NumberValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Value_StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + sovStruct(uint64(l)) + return n +} +func (m *Value_BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Value_StructValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StructValue != nil { + l = m.StructValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *Value_ListValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListValue != nil { + l = m.ListValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *ListValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovStruct(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovStruct(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStruct(x uint64) (n int) { + return sovStruct(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Struct) String() string { + if this == nil { + return "nil" + } + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%v: %v,", k, this.Fields[k]) + } + mapStringForFields += "}" + s := strings.Join([]string{`&Struct{`, + `Fields:` + mapStringForFields + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NullValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NullValue{`, + `NullValue:` + fmt.Sprintf("%v", this.NullValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NumberValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NumberValue{`, + `NumberValue:` + fmt.Sprintf("%v", this.NumberValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StringValue{`, + `StringValue:` + fmt.Sprintf("%v", this.StringValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_BoolValue{`, + `BoolValue:` + fmt.Sprintf("%v", this.BoolValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StructValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StructValue{`, + `StructValue:` + strings.Replace(fmt.Sprintf("%v", this.StructValue), "Struct", "Struct", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Value_ListValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_ListValue{`, + `ListValue:` + strings.Replace(fmt.Sprintf("%v", this.ListValue), "ListValue", "ListValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForValues := "[]*Value{" + for _, f := range this.Values { + repeatedStringForValues += strings.Replace(f.String(), "Value", "Value", 1) + "," + } + repeatedStringForValues += "}" + s := strings.Join([]string{`&ListValue{`, + `Values:` + repeatedStringForValues + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringStruct(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Struct) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*Value) + } + var mapkey string + var mapvalue *Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStruct + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthStruct + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthStruct + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Value{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= NullValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &Value_NullValue{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &Value_NumberValue{float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = &Value_StringValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &Value_BoolValue{b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Struct{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_StructValue{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ListValue{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_ListValue{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &Value{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStruct(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStruct + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupStruct + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthStruct + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthStruct = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStruct = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupStruct = fmt.Errorf("proto: unexpected end of group") +) diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/timestamp.go b/extender/code/vendor/github.com/gogo/protobuf/types/timestamp.go new file mode 100644 index 000000000..232ada57c --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/timestamp.go @@ -0,0 +1,130 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func TimestampFromProto(ts *Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*Timestamp, error) { + ts := &Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *Timestamp) string { + t, err := TimestampFromProto(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/extender/code/vendor/github.com/gogo/protobuf/types/timestamp.pb.go new file mode 100644 index 000000000..b81875267 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/timestamp.pb.go @@ -0,0 +1,542 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_292007bbfe81227e, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func (*Timestamp) XXX_MessageName() string { + return "google.protobuf.Timestamp" +} +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } + +var fileDescriptor_292007bbfe81227e = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x03, 0xe3, 0x8d, + 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, + 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, + 0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, + 0x59, 0xee, 0xc4, 0x07, 0xb7, 0x3a, 0x00, 0x24, 0x14, 0xc0, 0x18, 0xc5, 0x5a, 0x52, 0x59, 0x90, + 0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, + 0x9e, 0x00, 0xa8, 0x1e, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, + 0xca, 0x24, 0x36, 0xb0, 0x61, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x23, 0x83, 0xdd, + 0xfa, 0x00, 0x00, 0x00, +} + +func (this *Timestamp) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Timestamp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Timestamp) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Timestamp{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTimestamp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int { + offset -= sovTimestamp(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovTimestamp(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovTimestamp(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTimestamp(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTimestamp(x uint64) (n int) { + return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTimestamp(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTimestamp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTimestamp + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTimestamp + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTimestamp + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTimestamp = fmt.Errorf("proto: unexpected end of group") +) diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go new file mode 100644 index 000000000..e03fa1315 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go @@ -0,0 +1,94 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "time" +) + +func NewPopulatedTimestamp(r interface { + Int63() int64 +}, easy bool) *Timestamp { + this := &Timestamp{} + ns := int64(r.Int63()) + this.Seconds = ns / 1e9 + this.Nanos = int32(ns % 1e9) + return this +} + +func (ts *Timestamp) String() string { + return TimestampString(ts) +} + +func NewPopulatedStdTime(r interface { + Int63() int64 +}, easy bool) *time.Time { + timestamp := NewPopulatedTimestamp(r, easy) + t, err := TimestampFromProto(timestamp) + if err != nil { + return nil + } + return &t +} + +func SizeOfStdTime(t time.Time) int { + ts, err := TimestampProto(t) + if err != nil { + return 0 + } + return ts.Size() +} + +func StdTimeMarshal(t time.Time) ([]byte, error) { + size := SizeOfStdTime(t) + buf := make([]byte, size) + _, err := StdTimeMarshalTo(t, buf) + return buf, err +} + +func StdTimeMarshalTo(t time.Time, data []byte) (int, error) { + ts, err := TimestampProto(t) + if err != nil { + return 0, err + } + return ts.MarshalTo(data) +} + +func StdTimeUnmarshal(t *time.Time, data []byte) error { + ts := &Timestamp{} + if err := ts.Unmarshal(data); err != nil { + return err + } + tt, err := TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil +} diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/type.pb.go b/extender/code/vendor/github.com/gogo/protobuf/types/type.pb.go new file mode 100644 index 000000000..13b7ec02f --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/type.pb.go @@ -0,0 +1,3370 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/type.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + Syntax_SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + Syntax_SYNTAX_PROTO3 Syntax = 1 +) + +var Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", +} + +var Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, +} + +func (Syntax) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{0} +} + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + Field_TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + Field_TYPE_DOUBLE Field_Kind = 1 + // Field type float. + Field_TYPE_FLOAT Field_Kind = 2 + // Field type int64. + Field_TYPE_INT64 Field_Kind = 3 + // Field type uint64. + Field_TYPE_UINT64 Field_Kind = 4 + // Field type int32. + Field_TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + Field_TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + Field_TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + Field_TYPE_BOOL Field_Kind = 8 + // Field type string. + Field_TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + Field_TYPE_GROUP Field_Kind = 10 + // Field type message. + Field_TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + Field_TYPE_BYTES Field_Kind = 12 + // Field type uint32. + Field_TYPE_UINT32 Field_Kind = 13 + // Field type enum. + Field_TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + Field_TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + Field_TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + Field_TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + Field_TYPE_SINT64 Field_Kind = 18 +) + +var Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (Field_Kind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1, 0} +} + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + Field_CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + Field_CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + Field_CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + Field_CARDINALITY_REPEATED Field_Cardinality = 3 +) + +var Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", +} + +var Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, +} + +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1, 1} +} + +// A protocol buffer message type. +type Type struct { + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs,proto3" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Type) Reset() { *m = Type{} } +func (*Type) ProtoMessage() {} +func (*Type) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{0} +} +func (m *Type) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Type) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Type.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Type) XXX_Merge(src proto.Message) { + xxx_messageInfo_Type.Merge(m, src) +} +func (m *Type) XXX_Size() int { + return m.Size() +} +func (m *Type) XXX_DiscardUnknown() { + xxx_messageInfo_Type.DiscardUnknown(m) +} + +var xxx_messageInfo_Type proto.InternalMessageInfo + +func (m *Type) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Type) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *Type) GetOneofs() []string { + if m != nil { + return m.Oneofs + } + return nil +} + +func (m *Type) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Type) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Type) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Type) XXX_MessageName() string { + return "google.protobuf.Type" +} + +// A single field of a message type. +type Field struct { + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex,proto3" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed,proto3" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName,proto3" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1} +} +func (m *Field) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Field.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Field) XXX_Merge(src proto.Message) { + xxx_messageInfo_Field.Merge(m, src) +} +func (m *Field) XXX_Size() int { + return m.Size() +} +func (m *Field) XXX_DiscardUnknown() { + xxx_messageInfo_Field.DiscardUnknown(m) +} + +var xxx_messageInfo_Field proto.InternalMessageInfo + +func (m *Field) GetKind() Field_Kind { + if m != nil { + return m.Kind + } + return Field_TYPE_UNKNOWN +} + +func (m *Field) GetCardinality() Field_Cardinality { + if m != nil { + return m.Cardinality + } + return Field_CARDINALITY_UNKNOWN +} + +func (m *Field) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Field) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Field) GetOneofIndex() int32 { + if m != nil { + return m.OneofIndex + } + return 0 +} + +func (m *Field) GetPacked() bool { + if m != nil { + return m.Packed + } + return false +} + +func (m *Field) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Field) GetJsonName() string { + if m != nil { + return m.JsonName + } + return "" +} + +func (m *Field) GetDefaultValue() string { + if m != nil { + return m.DefaultValue + } + return "" +} + +func (*Field) XXX_MessageName() string { + return "google.protobuf.Field" +} + +// Enum type definition. +type Enum struct { + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue,proto3" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Enum) Reset() { *m = Enum{} } +func (*Enum) ProtoMessage() {} +func (*Enum) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{2} +} +func (m *Enum) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Enum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Enum.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Enum) XXX_Merge(src proto.Message) { + xxx_messageInfo_Enum.Merge(m, src) +} +func (m *Enum) XXX_Size() int { + return m.Size() +} +func (m *Enum) XXX_DiscardUnknown() { + xxx_messageInfo_Enum.DiscardUnknown(m) +} + +var xxx_messageInfo_Enum proto.InternalMessageInfo + +func (m *Enum) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Enum) GetEnumvalue() []*EnumValue { + if m != nil { + return m.Enumvalue + } + return nil +} + +func (m *Enum) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Enum) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Enum) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Enum) XXX_MessageName() string { + return "google.protobuf.Enum" +} + +// Enum value definition. +type EnumValue struct { + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValue) Reset() { *m = EnumValue{} } +func (*EnumValue) ProtoMessage() {} +func (*EnumValue) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{3} +} +func (m *EnumValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnumValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnumValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnumValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValue.Merge(m, src) +} +func (m *EnumValue) XXX_Size() int { + return m.Size() +} +func (m *EnumValue) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValue.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValue proto.InternalMessageInfo + +func (m *EnumValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnumValue) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *EnumValue) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (*EnumValue) XXX_MessageName() string { + return "google.protobuf.EnumValue" +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Option) Reset() { *m = Option{} } +func (*Option) ProtoMessage() {} +func (*Option) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{4} +} +func (m *Option) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Option) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Option.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Option) XXX_Merge(src proto.Message) { + xxx_messageInfo_Option.Merge(m, src) +} +func (m *Option) XXX_Size() int { + return m.Size() +} +func (m *Option) XXX_DiscardUnknown() { + xxx_messageInfo_Option.DiscardUnknown(m) +} + +var xxx_messageInfo_Option proto.InternalMessageInfo + +func (m *Option) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Option) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +func (*Option) XXX_MessageName() string { + return "google.protobuf.Option" +} +func init() { + proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value) + proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value) + proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value) + proto.RegisterType((*Type)(nil), "google.protobuf.Type") + proto.RegisterType((*Field)(nil), "google.protobuf.Field") + proto.RegisterType((*Enum)(nil), "google.protobuf.Enum") + proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue") + proto.RegisterType((*Option)(nil), "google.protobuf.Option") +} + +func init() { proto.RegisterFile("google/protobuf/type.proto", fileDescriptor_dd271cc1e348c538) } + +var fileDescriptor_dd271cc1e348c538 = []byte{ + // 840 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x73, 0xda, 0x46, + 0x14, 0xf6, 0x0a, 0x21, 0xa3, 0x87, 0xc1, 0x9b, 0x4d, 0x26, 0x51, 0x9c, 0x19, 0x95, 0xa1, 0x3d, + 0x30, 0x39, 0xe0, 0x29, 0x78, 0x3c, 0xbd, 0x82, 0x91, 0x29, 0x63, 0x22, 0xa9, 0x8b, 0x68, 0xe2, + 0x5e, 0x18, 0x0c, 0x72, 0x86, 0x44, 0xac, 0x18, 0x24, 0x5a, 0x73, 0xeb, 0x4c, 0xcf, 0xfd, 0x27, + 0x7a, 0xea, 0xf4, 0xdc, 0x3f, 0xc2, 0xc7, 0x1e, 0x7b, 0xac, 0xc9, 0xa5, 0xc7, 0x1c, 0x73, 0x6b, + 0x67, 0x57, 0x20, 0x8b, 0x1f, 0x9d, 0x49, 0xdb, 0x1b, 0xef, 0xfb, 0xbe, 0xf7, 0x73, 0x9f, 0x1e, + 0x70, 0xf4, 0xda, 0xf7, 0x5f, 0x7b, 0xee, 0xf1, 0x64, 0xea, 0x87, 0xfe, 0xd5, 0xec, 0xfa, 0x38, + 0x9c, 0x4f, 0xdc, 0xb2, 0xb0, 0xc8, 0x61, 0xc4, 0x95, 0x57, 0xdc, 0xd1, 0xd3, 0x4d, 0x71, 0x9f, + 0xcd, 0x23, 0xf6, 0xe8, 0xb3, 0x4d, 0x2a, 0xf0, 0x67, 0xd3, 0x81, 0xdb, 0x1b, 0xf8, 0x2c, 0x74, + 0x6f, 0xc2, 0x48, 0x55, 0xfc, 0x51, 0x02, 0xd9, 0x99, 0x4f, 0x5c, 0x42, 0x40, 0x66, 0xfd, 0xb1, + 0xab, 0xa1, 0x02, 0x2a, 0xa9, 0x54, 0xfc, 0x26, 0x65, 0x50, 0xae, 0x47, 0xae, 0x37, 0x0c, 0x34, + 0xa9, 0x90, 0x2a, 0x65, 0x2b, 0x8f, 0xcb, 0x1b, 0xf9, 0xcb, 0xe7, 0x9c, 0xa6, 0x4b, 0x15, 0x79, + 0x0c, 0x8a, 0xcf, 0x5c, 0xff, 0x3a, 0xd0, 0x52, 0x85, 0x54, 0x49, 0xa5, 0x4b, 0x8b, 0x7c, 0x0e, + 0xfb, 0xfe, 0x24, 0x1c, 0xf9, 0x2c, 0xd0, 0x64, 0x11, 0xe8, 0xc9, 0x56, 0x20, 0x4b, 0xf0, 0x74, + 0xa5, 0x23, 0x06, 0xe4, 0xd7, 0xeb, 0xd5, 0xd2, 0x05, 0x54, 0xca, 0x56, 0xf4, 0x2d, 0xcf, 0x8e, + 0x90, 0x9d, 0x45, 0x2a, 0x9a, 0x0b, 0x92, 0x26, 0x39, 0x06, 0x25, 0x98, 0xb3, 0xb0, 0x7f, 0xa3, + 0x29, 0x05, 0x54, 0xca, 0xef, 0x48, 0xdc, 0x11, 0x34, 0x5d, 0xca, 0x8a, 0xbf, 0x2a, 0x90, 0x16, + 0x4d, 0x91, 0x63, 0x90, 0xdf, 0x8e, 0xd8, 0x50, 0x0c, 0x24, 0x5f, 0x79, 0xb6, 0xbb, 0xf5, 0xf2, + 0xc5, 0x88, 0x0d, 0xa9, 0x10, 0x92, 0x06, 0x64, 0x07, 0xfd, 0xe9, 0x70, 0xc4, 0xfa, 0xde, 0x28, + 0x9c, 0x6b, 0x92, 0xf0, 0x2b, 0xfe, 0x83, 0xdf, 0xd9, 0xbd, 0x92, 0x26, 0xdd, 0xf8, 0x0c, 0xd9, + 0x6c, 0x7c, 0xe5, 0x4e, 0xb5, 0x54, 0x01, 0x95, 0xd2, 0x74, 0x69, 0xc5, 0xef, 0x23, 0x27, 0xde, + 0xe7, 0x29, 0x64, 0xf8, 0x72, 0xf4, 0x66, 0x53, 0x4f, 0xf4, 0xa7, 0xd2, 0x7d, 0x6e, 0x77, 0xa7, + 0x1e, 0xf9, 0x04, 0xb2, 0x62, 0xf8, 0xbd, 0x11, 0x1b, 0xba, 0x37, 0xda, 0xbe, 0x88, 0x05, 0x02, + 0x6a, 0x71, 0x84, 0xe7, 0x99, 0xf4, 0x07, 0x6f, 0xdd, 0xa1, 0x96, 0x29, 0xa0, 0x52, 0x86, 0x2e, + 0xad, 0xe4, 0x5b, 0xa9, 0x1f, 0xf9, 0x56, 0xcf, 0x40, 0x7d, 0x13, 0xf8, 0xac, 0x27, 0xea, 0x03, + 0x51, 0x47, 0x86, 0x03, 0x26, 0xaf, 0xf1, 0x53, 0xc8, 0x0d, 0xdd, 0xeb, 0xfe, 0xcc, 0x0b, 0x7b, + 0xdf, 0xf6, 0xbd, 0x99, 0xab, 0x65, 0x85, 0xe0, 0x60, 0x09, 0x7e, 0xcd, 0xb1, 0xe2, 0xad, 0x04, + 0x32, 0x9f, 0x24, 0xc1, 0x70, 0xe0, 0x5c, 0xda, 0x46, 0xaf, 0x6b, 0x5e, 0x98, 0xd6, 0x4b, 0x13, + 0xef, 0x91, 0x43, 0xc8, 0x0a, 0xa4, 0x61, 0x75, 0xeb, 0x6d, 0x03, 0x23, 0x92, 0x07, 0x10, 0xc0, + 0x79, 0xdb, 0xaa, 0x39, 0x58, 0x8a, 0xed, 0x96, 0xe9, 0x9c, 0x9e, 0xe0, 0x54, 0xec, 0xd0, 0x8d, + 0x00, 0x39, 0x29, 0xa8, 0x56, 0x70, 0x3a, 0xce, 0x71, 0xde, 0x7a, 0x65, 0x34, 0x4e, 0x4f, 0xb0, + 0xb2, 0x8e, 0x54, 0x2b, 0x78, 0x9f, 0xe4, 0x40, 0x15, 0x48, 0xdd, 0xb2, 0xda, 0x38, 0x13, 0xc7, + 0xec, 0x38, 0xb4, 0x65, 0x36, 0xb1, 0x1a, 0xc7, 0x6c, 0x52, 0xab, 0x6b, 0x63, 0x88, 0x23, 0xbc, + 0x30, 0x3a, 0x9d, 0x5a, 0xd3, 0xc0, 0xd9, 0x58, 0x51, 0xbf, 0x74, 0x8c, 0x0e, 0x3e, 0x58, 0x2b, + 0xab, 0x5a, 0xc1, 0xb9, 0x38, 0x85, 0x61, 0x76, 0x5f, 0xe0, 0x3c, 0x79, 0x00, 0xb9, 0x28, 0xc5, + 0xaa, 0x88, 0xc3, 0x0d, 0xe8, 0xf4, 0x04, 0xe3, 0xfb, 0x42, 0xa2, 0x28, 0x0f, 0xd6, 0x80, 0xd3, + 0x13, 0x4c, 0x8a, 0x21, 0x64, 0x13, 0xbb, 0x45, 0x9e, 0xc0, 0xc3, 0xb3, 0x1a, 0x6d, 0xb4, 0xcc, + 0x5a, 0xbb, 0xe5, 0x5c, 0x26, 0xe6, 0xaa, 0xc1, 0xa3, 0x24, 0x61, 0xd9, 0x4e, 0xcb, 0x32, 0x6b, + 0x6d, 0x8c, 0x36, 0x19, 0x6a, 0x7c, 0xd5, 0x6d, 0x51, 0xa3, 0x81, 0xa5, 0x6d, 0xc6, 0x36, 0x6a, + 0x8e, 0xd1, 0xc0, 0xa9, 0xe2, 0x5f, 0x08, 0x64, 0x83, 0xcd, 0xc6, 0x3b, 0xcf, 0xc8, 0x17, 0xa0, + 0xba, 0x6c, 0x36, 0x8e, 0x9e, 0x3f, 0xba, 0x24, 0x47, 0x5b, 0x4b, 0xc5, 0xbd, 0xc5, 0x32, 0xd0, + 0x7b, 0x71, 0x72, 0x19, 0x53, 0xff, 0xf9, 0x70, 0xc8, 0xff, 0xef, 0x70, 0xa4, 0x3f, 0xee, 0x70, + 0xbc, 0x01, 0x35, 0x6e, 0x61, 0xe7, 0x14, 0xee, 0x3f, 0x6c, 0x69, 0xed, 0xc3, 0xfe, 0xf7, 0x3d, + 0x16, 0xbf, 0x04, 0x25, 0x82, 0x76, 0x26, 0x7a, 0x0e, 0xe9, 0xd5, 0xa8, 0x79, 0xe3, 0x8f, 0xb6, + 0xc2, 0xd5, 0xd8, 0x9c, 0x46, 0x92, 0xe7, 0x65, 0x50, 0xa2, 0x3e, 0xf8, 0xb2, 0x75, 0x2e, 0x4d, + 0xa7, 0xf6, 0xaa, 0x67, 0x53, 0xcb, 0xb1, 0x2a, 0x78, 0x6f, 0x13, 0xaa, 0x62, 0x54, 0xff, 0x01, + 0xfd, 0x7e, 0xa7, 0xef, 0xbd, 0xbf, 0xd3, 0xd1, 0x87, 0x3b, 0x1d, 0x7d, 0xbf, 0xd0, 0xd1, 0xcf, + 0x0b, 0x1d, 0xdd, 0x2e, 0x74, 0xf4, 0xdb, 0x42, 0x47, 0x7f, 0x2c, 0x74, 0xf4, 0xe7, 0x42, 0xdf, + 0x7b, 0xcf, 0xf1, 0x77, 0x3a, 0xba, 0x7d, 0xa7, 0x23, 0x78, 0x38, 0xf0, 0xc7, 0x9b, 0x25, 0xd4, + 0x55, 0xfe, 0x9f, 0x63, 0x73, 0xcb, 0x46, 0xdf, 0xa4, 0xf9, 0xd1, 0x0a, 0x3e, 0x20, 0xf4, 0x93, + 0x94, 0x6a, 0xda, 0xf5, 0x5f, 0x24, 0xbd, 0x19, 0xc9, 0xed, 0x55, 0xc5, 0x2f, 0x5d, 0xcf, 0xbb, + 0x60, 0xfe, 0x77, 0x8c, 0xbb, 0x05, 0x57, 0x8a, 0x88, 0x53, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff, + 0xbc, 0x2a, 0x5e, 0x82, 0x2b, 0x07, 0x00, 0x00, +} + +func (this *Type) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + if len(this.Oneofs) < len(that1.Oneofs) { + return -1 + } + return 1 + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + if this.Oneofs[i] < that1.Oneofs[i] { + return -1 + } + return 1 + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Field) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Kind != that1.Kind { + if this.Kind < that1.Kind { + return -1 + } + return 1 + } + if this.Cardinality != that1.Cardinality { + if this.Cardinality < that1.Cardinality { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if this.OneofIndex != that1.OneofIndex { + if this.OneofIndex < that1.OneofIndex { + return -1 + } + return 1 + } + if this.Packed != that1.Packed { + if !this.Packed { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.JsonName != that1.JsonName { + if this.JsonName < that1.JsonName { + return -1 + } + return 1 + } + if this.DefaultValue != that1.DefaultValue { + if this.DefaultValue < that1.DefaultValue { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Enum) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + if len(this.Enumvalue) < len(that1.Enumvalue) { + return -1 + } + return 1 + } + for i := range this.Enumvalue { + if c := this.Enumvalue[i].Compare(that1.Enumvalue[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *EnumValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Option) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if c := this.Value.Compare(that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x Syntax) String() string { + s, ok := Syntax_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Kind) String() string { + s, ok := Field_Kind_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Cardinality) String() string { + s, ok := Field_Cardinality_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Type) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + return false + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Field) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Kind != that1.Kind { + return false + } + if this.Cardinality != that1.Cardinality { + return false + } + if this.Number != that1.Number { + return false + } + if this.Name != that1.Name { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if this.OneofIndex != that1.OneofIndex { + return false + } + if this.Packed != that1.Packed { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.JsonName != that1.JsonName { + return false + } + if this.DefaultValue != that1.DefaultValue { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Enum) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + return false + } + for i := range this.Enumvalue { + if !this.Enumvalue[i].Equal(that1.Enumvalue[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *EnumValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Number != that1.Number { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Option) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.Value.Equal(that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Type) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Type{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Fields != nil { + s = append(s, "Fields: "+fmt.Sprintf("%#v", this.Fields)+",\n") + } + s = append(s, "Oneofs: "+fmt.Sprintf("%#v", this.Oneofs)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Field) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&types.Field{") + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "OneofIndex: "+fmt.Sprintf("%#v", this.OneofIndex)+",\n") + s = append(s, "Packed: "+fmt.Sprintf("%#v", this.Packed)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "JsonName: "+fmt.Sprintf("%#v", this.JsonName)+",\n") + s = append(s, "DefaultValue: "+fmt.Sprintf("%#v", this.DefaultValue)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Enum) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&types.Enum{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Enumvalue != nil { + s = append(s, "Enumvalue: "+fmt.Sprintf("%#v", this.Enumvalue)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&types.EnumValue{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Option) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Option{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringType(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Type) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Type) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Type) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x30 + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Oneofs) > 0 { + for iNdEx := len(m.Oneofs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Oneofs[iNdEx]) + copy(dAtA[i:], m.Oneofs[iNdEx]) + i = encodeVarintType(dAtA, i, uint64(len(m.Oneofs[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Field) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Field) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Field) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = encodeVarintType(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x5a + } + if len(m.JsonName) > 0 { + i -= len(m.JsonName) + copy(dAtA[i:], m.JsonName) + i = encodeVarintType(dAtA, i, uint64(len(m.JsonName))) + i-- + dAtA[i] = 0x52 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.Packed { + i-- + if m.Packed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.OneofIndex != 0 { + i = encodeVarintType(dAtA, i, uint64(m.OneofIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = encodeVarintType(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x32 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if m.Number != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x18 + } + if m.Cardinality != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Cardinality)) + i-- + dAtA[i] = 0x10 + } + if m.Kind != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Enum) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Enum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Enum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x28 + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Enumvalue) > 0 { + for iNdEx := len(m.Enumvalue) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enumvalue[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnumValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnumValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnumValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Number != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Option) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Option) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Option) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintType(dAtA []byte, offset int, v uint64) int { + offset -= sovType(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedType(r randyType, easy bool) *Type { + this := &Type{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Fields = make([]*Field, v1) + for i := 0; i < v1; i++ { + this.Fields[i] = NewPopulatedField(r, easy) + } + } + v2 := r.Intn(10) + this.Oneofs = make([]string, v2) + for i := 0; i < v2; i++ { + this.Oneofs[i] = string(randStringType(r)) + } + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Options = make([]*Option, v3) + for i := 0; i < v3; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 7) + } + return this +} + +func NewPopulatedField(r randyType, easy bool) *Field { + this := &Field{} + this.Kind = Field_Kind([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}[r.Intn(19)]) + this.Cardinality = Field_Cardinality([]int32{0, 1, 2, 3}[r.Intn(4)]) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + this.Name = string(randStringType(r)) + this.TypeUrl = string(randStringType(r)) + this.OneofIndex = int32(r.Int31()) + if r.Intn(2) == 0 { + this.OneofIndex *= -1 + } + this.Packed = bool(bool(r.Intn(2) == 0)) + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.JsonName = string(randStringType(r)) + this.DefaultValue = string(randStringType(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 12) + } + return this +} + +func NewPopulatedEnum(r randyType, easy bool) *Enum { + this := &Enum{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + v5 := r.Intn(5) + this.Enumvalue = make([]*EnumValue, v5) + for i := 0; i < v5; i++ { + this.Enumvalue[i] = NewPopulatedEnumValue(r, easy) + } + } + if r.Intn(5) != 0 { + v6 := r.Intn(5) + this.Options = make([]*Option, v6) + for i := 0; i < v6; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 6) + } + return this +} + +func NewPopulatedEnumValue(r randyType, easy bool) *EnumValue { + this := &EnumValue{} + this.Name = string(randStringType(r)) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + if r.Intn(5) != 0 { + v7 := r.Intn(5) + this.Options = make([]*Option, v7) + for i := 0; i < v7; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 4) + } + return this +} + +func NewPopulatedOption(r randyType, easy bool) *Option { + this := &Option{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + this.Value = NewPopulatedAny(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 3) + } + return this +} + +type randyType interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneType(r randyType) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringType(r randyType) string { + v8 := r.Intn(100) + tmps := make([]rune, v8) + for i := 0; i < v8; i++ { + tmps[i] = randUTF8RuneType(r) + } + return string(tmps) +} +func randUnrecognizedType(r randyType, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldType(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldType(dAtA []byte, r randyType, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + v9 := r.Int63() + if r.Intn(2) == 0 { + v9 *= -1 + } + dAtA = encodeVarintPopulateType(dAtA, uint64(v9)) + case 1: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateType(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateType(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Type) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Oneofs) > 0 { + for _, s := range m.Oneofs { + l = len(s) + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Field) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovType(uint64(m.Kind)) + } + if m.Cardinality != 0 { + n += 1 + sovType(uint64(m.Cardinality)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.OneofIndex != 0 { + n += 1 + sovType(uint64(m.OneofIndex)) + } + if m.Packed { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + l = len(m.JsonName) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Enum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Enumvalue) > 0 { + for _, e := range m.Enumvalue { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnumValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Option) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovType(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozType(x uint64) (n int) { + return sovType(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Type) String() string { + if this == nil { + return "nil" + } + repeatedStringForFields := "[]*Field{" + for _, f := range this.Fields { + repeatedStringForFields += strings.Replace(f.String(), "Field", "Field", 1) + "," + } + repeatedStringForFields += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Type{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Fields:` + repeatedStringForFields + `,`, + `Oneofs:` + fmt.Sprintf("%v", this.Oneofs) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Field) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Field{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `OneofIndex:` + fmt.Sprintf("%v", this.OneofIndex) + `,`, + `Packed:` + fmt.Sprintf("%v", this.Packed) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `JsonName:` + fmt.Sprintf("%v", this.JsonName) + `,`, + `DefaultValue:` + fmt.Sprintf("%v", this.DefaultValue) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Enum) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnumvalue := "[]*EnumValue{" + for _, f := range this.Enumvalue { + repeatedStringForEnumvalue += strings.Replace(f.String(), "EnumValue", "EnumValue", 1) + "," + } + repeatedStringForEnumvalue += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Enum{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Enumvalue:` + repeatedStringForEnumvalue + `,`, + `Options:` + repeatedStringForOptions + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *EnumValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&EnumValue{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Option) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Option{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Any", "Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringType(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Type) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Type: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Type: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Oneofs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Oneofs = append(m.Oneofs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Field) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Field: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= Field_Kind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType) + } + m.Cardinality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cardinality |= Field_Cardinality(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OneofIndex", wireType) + } + m.OneofIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OneofIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Packed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Packed = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JsonName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JsonName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Enum) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Enum: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Enum: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enumvalue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enumvalue = append(m.Enumvalue, &EnumValue{}) + if err := m.Enumvalue[len(m.Enumvalue)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnumValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnumValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnumValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Option) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Option: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Option: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &Any{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipType(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthType + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupType + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthType + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthType = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowType = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupType = fmt.Errorf("proto: unexpected end of group") +) diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/extender/code/vendor/github.com/gogo/protobuf/types/wrappers.pb.go new file mode 100644 index 000000000..8f1edb57d --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/wrappers.pb.go @@ -0,0 +1,2730 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package types + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(m, src) +} +func (m *DoubleValue) XXX_Size() int { + return m.Size() +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*DoubleValue) XXX_MessageName() string { + return "google.protobuf.DoubleValue" +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(m, src) +} +func (m *FloatValue) XXX_Size() int { + return m.Size() +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*FloatValue) XXX_MessageName() string { + return "google.protobuf.FloatValue" +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) +} +func (m *Int64Value) XXX_Size() int { + return m.Size() +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*Int64Value) XXX_MessageName() string { + return "google.protobuf.Int64Value" +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) +} +func (m *UInt64Value) XXX_Size() int { + return m.Size() +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*UInt64Value) XXX_MessageName() string { + return "google.protobuf.UInt64Value" +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(m, src) +} +func (m *Int32Value) XXX_Size() int { + return m.Size() +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*Int32Value) XXX_MessageName() string { + return "google.protobuf.Int32Value" +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(m, src) +} +func (m *UInt32Value) XXX_Size() int { + return m.Size() +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*UInt32Value) XXX_MessageName() string { + return "google.protobuf.UInt32Value" +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(m, src) +} +func (m *BoolValue) XXX_Size() int { + return m.Size() +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +func (*BoolValue) XXX_MessageName() string { + return "google.protobuf.BoolValue" +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(m, src) +} +func (m *StringValue) XXX_Size() int { + return m.Size() +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (*StringValue) XXX_MessageName() string { + return "google.protobuf.StringValue" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(m, src) +} +func (m *BytesValue) XXX_Size() int { + return m.Size() +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (*BytesValue) XXX_MessageName() string { + return "google.protobuf.BytesValue" +} +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } + +var fileDescriptor_5377b62bda767935 = []byte{ + // 285 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x3b, + 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, 0xc7, 0xd8, 0xf0, 0x48, + 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, + 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, + 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x45, 0x87, 0x13, 0x6f, 0x38, 0x34, 0xbe, 0x02, + 0x40, 0x22, 0x01, 0x8c, 0x51, 0xac, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x3f, 0x18, 0x19, 0x17, 0x31, + 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0x68, 0x09, 0x80, 0x6a, 0xd1, 0x0b, 0x4f, + 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4c, 0x62, 0x03, 0x9b, 0x65, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0x31, 0x55, 0x64, 0x90, 0x0a, 0x02, 0x00, 0x00, +} + +func (this *DoubleValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *FloatValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Int64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *UInt64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Int32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *UInt32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if !this.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BytesValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *DoubleValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *FloatValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Int64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *UInt64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Int32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *UInt32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BytesValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *DoubleValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.DoubleValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FloatValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FloatValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BoolValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StringValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.StringValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BytesValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BytesValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWrappers(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DoubleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FloatValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FloatValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoolValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StringValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BytesValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintWrappers(dAtA []byte, offset int, v uint64) int { + offset -= sovWrappers(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedDoubleValue(r randyWrappers, easy bool) *DoubleValue { + this := &DoubleValue{} + this.Value = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedFloatValue(r randyWrappers, easy bool) *FloatValue { + this := &FloatValue{} + this.Value = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedInt64Value(r randyWrappers, easy bool) *Int64Value { + this := &Int64Value{} + this.Value = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedUInt64Value(r randyWrappers, easy bool) *UInt64Value { + this := &UInt64Value{} + this.Value = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedInt32Value(r randyWrappers, easy bool) *Int32Value { + this := &Int32Value{} + this.Value = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedUInt32Value(r randyWrappers, easy bool) *UInt32Value { + this := &UInt32Value{} + this.Value = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedBoolValue(r randyWrappers, easy bool) *BoolValue { + this := &BoolValue{} + this.Value = bool(bool(r.Intn(2) == 0)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedStringValue(r randyWrappers, easy bool) *StringValue { + this := &StringValue{} + this.Value = string(randStringWrappers(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedBytesValue(r randyWrappers, easy bool) *BytesValue { + this := &BytesValue{} + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +type randyWrappers interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneWrappers(r randyWrappers) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringWrappers(r randyWrappers) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneWrappers(r) + } + return string(tmps) +} +func randUnrecognizedWrappers(r randyWrappers, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldWrappers(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldWrappers(dAtA []byte, r randyWrappers, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateWrappers(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *DoubleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FloatValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 5 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Int64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UInt64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Int32Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UInt32Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BytesValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovWrappers(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWrappers(x uint64) (n int) { + return sovWrappers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DoubleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DoubleValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *FloatValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FloatValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Int64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UInt64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Int32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UInt32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BoolValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StringValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BytesValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringWrappers(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DoubleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWrappers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthWrappers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWrappers(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWrappers + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupWrappers + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthWrappers + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthWrappers = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWrappers = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupWrappers = fmt.Errorf("proto: unexpected end of group") +) diff --git a/extender/code/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go b/extender/code/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go new file mode 100644 index 000000000..d905df360 --- /dev/null +++ b/extender/code/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go @@ -0,0 +1,300 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +func NewPopulatedStdDouble(r randyWrappers, easy bool) *float64 { + v := NewPopulatedDoubleValue(r, easy) + return &v.Value +} + +func SizeOfStdDouble(v float64) int { + pv := &DoubleValue{Value: v} + return pv.Size() +} + +func StdDoubleMarshal(v float64) ([]byte, error) { + size := SizeOfStdDouble(v) + buf := make([]byte, size) + _, err := StdDoubleMarshalTo(v, buf) + return buf, err +} + +func StdDoubleMarshalTo(v float64, data []byte) (int, error) { + pv := &DoubleValue{Value: v} + return pv.MarshalTo(data) +} + +func StdDoubleUnmarshal(v *float64, data []byte) error { + pv := &DoubleValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdFloat(r randyWrappers, easy bool) *float32 { + v := NewPopulatedFloatValue(r, easy) + return &v.Value +} + +func SizeOfStdFloat(v float32) int { + pv := &FloatValue{Value: v} + return pv.Size() +} + +func StdFloatMarshal(v float32) ([]byte, error) { + size := SizeOfStdFloat(v) + buf := make([]byte, size) + _, err := StdFloatMarshalTo(v, buf) + return buf, err +} + +func StdFloatMarshalTo(v float32, data []byte) (int, error) { + pv := &FloatValue{Value: v} + return pv.MarshalTo(data) +} + +func StdFloatUnmarshal(v *float32, data []byte) error { + pv := &FloatValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdInt64(r randyWrappers, easy bool) *int64 { + v := NewPopulatedInt64Value(r, easy) + return &v.Value +} + +func SizeOfStdInt64(v int64) int { + pv := &Int64Value{Value: v} + return pv.Size() +} + +func StdInt64Marshal(v int64) ([]byte, error) { + size := SizeOfStdInt64(v) + buf := make([]byte, size) + _, err := StdInt64MarshalTo(v, buf) + return buf, err +} + +func StdInt64MarshalTo(v int64, data []byte) (int, error) { + pv := &Int64Value{Value: v} + return pv.MarshalTo(data) +} + +func StdInt64Unmarshal(v *int64, data []byte) error { + pv := &Int64Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdUInt64(r randyWrappers, easy bool) *uint64 { + v := NewPopulatedUInt64Value(r, easy) + return &v.Value +} + +func SizeOfStdUInt64(v uint64) int { + pv := &UInt64Value{Value: v} + return pv.Size() +} + +func StdUInt64Marshal(v uint64) ([]byte, error) { + size := SizeOfStdUInt64(v) + buf := make([]byte, size) + _, err := StdUInt64MarshalTo(v, buf) + return buf, err +} + +func StdUInt64MarshalTo(v uint64, data []byte) (int, error) { + pv := &UInt64Value{Value: v} + return pv.MarshalTo(data) +} + +func StdUInt64Unmarshal(v *uint64, data []byte) error { + pv := &UInt64Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdInt32(r randyWrappers, easy bool) *int32 { + v := NewPopulatedInt32Value(r, easy) + return &v.Value +} + +func SizeOfStdInt32(v int32) int { + pv := &Int32Value{Value: v} + return pv.Size() +} + +func StdInt32Marshal(v int32) ([]byte, error) { + size := SizeOfStdInt32(v) + buf := make([]byte, size) + _, err := StdInt32MarshalTo(v, buf) + return buf, err +} + +func StdInt32MarshalTo(v int32, data []byte) (int, error) { + pv := &Int32Value{Value: v} + return pv.MarshalTo(data) +} + +func StdInt32Unmarshal(v *int32, data []byte) error { + pv := &Int32Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdUInt32(r randyWrappers, easy bool) *uint32 { + v := NewPopulatedUInt32Value(r, easy) + return &v.Value +} + +func SizeOfStdUInt32(v uint32) int { + pv := &UInt32Value{Value: v} + return pv.Size() +} + +func StdUInt32Marshal(v uint32) ([]byte, error) { + size := SizeOfStdUInt32(v) + buf := make([]byte, size) + _, err := StdUInt32MarshalTo(v, buf) + return buf, err +} + +func StdUInt32MarshalTo(v uint32, data []byte) (int, error) { + pv := &UInt32Value{Value: v} + return pv.MarshalTo(data) +} + +func StdUInt32Unmarshal(v *uint32, data []byte) error { + pv := &UInt32Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdBool(r randyWrappers, easy bool) *bool { + v := NewPopulatedBoolValue(r, easy) + return &v.Value +} + +func SizeOfStdBool(v bool) int { + pv := &BoolValue{Value: v} + return pv.Size() +} + +func StdBoolMarshal(v bool) ([]byte, error) { + size := SizeOfStdBool(v) + buf := make([]byte, size) + _, err := StdBoolMarshalTo(v, buf) + return buf, err +} + +func StdBoolMarshalTo(v bool, data []byte) (int, error) { + pv := &BoolValue{Value: v} + return pv.MarshalTo(data) +} + +func StdBoolUnmarshal(v *bool, data []byte) error { + pv := &BoolValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdString(r randyWrappers, easy bool) *string { + v := NewPopulatedStringValue(r, easy) + return &v.Value +} + +func SizeOfStdString(v string) int { + pv := &StringValue{Value: v} + return pv.Size() +} + +func StdStringMarshal(v string) ([]byte, error) { + size := SizeOfStdString(v) + buf := make([]byte, size) + _, err := StdStringMarshalTo(v, buf) + return buf, err +} + +func StdStringMarshalTo(v string, data []byte) (int, error) { + pv := &StringValue{Value: v} + return pv.MarshalTo(data) +} + +func StdStringUnmarshal(v *string, data []byte) error { + pv := &StringValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdBytes(r randyWrappers, easy bool) *[]byte { + v := NewPopulatedBytesValue(r, easy) + return &v.Value +} + +func SizeOfStdBytes(v []byte) int { + pv := &BytesValue{Value: v} + return pv.Size() +} + +func StdBytesMarshal(v []byte) ([]byte, error) { + size := SizeOfStdBytes(v) + buf := make([]byte, size) + _, err := StdBytesMarshalTo(v, buf) + return buf, err +} + +func StdBytesMarshalTo(v []byte, data []byte) (int, error) { + pv := &BytesValue{Value: v} + return pv.MarshalTo(data) +} + +func StdBytesUnmarshal(v *[]byte, data []byte) error { + pv := &BytesValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} diff --git a/extender/code/vendor/github.com/golang/protobuf/AUTHORS b/extender/code/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/extender/code/vendor/github.com/golang/protobuf/CONTRIBUTORS b/extender/code/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/extender/code/vendor/github.com/golang/protobuf/LICENSE b/extender/code/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 000000000..0f646931a --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/extender/code/vendor/github.com/golang/protobuf/proto/buffer.go b/extender/code/vendor/github.com/golang/protobuf/proto/buffer.go new file mode 100644 index 000000000..e810e6fea --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/proto/buffer.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + WireVarint = 0 + WireFixed32 = 5 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 +) + +// EncodeVarint returns the varint encoded bytes of v. +func EncodeVarint(v uint64) []byte { + return protowire.AppendVarint(nil, v) +} + +// SizeVarint returns the length of the varint encoded bytes of v. +// This is equal to len(EncodeVarint(v)). +func SizeVarint(v uint64) int { + return protowire.SizeVarint(v) +} + +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. +// It returns (0, 0) if there is a parse error. +func DecodeVarint(b []byte) (uint64, int) { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, 0 + } + return v, n +} + +// Buffer is a buffer for encoding and decoding the protobuf wire format. +// It may be reused between invocations to reduce memory usage. +type Buffer struct { + buf []byte + idx int + deterministic bool +} + +// NewBuffer allocates a new Buffer initialized with buf, +// where the contents of buf are considered the unread portion of the buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic specifies whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (b *Buffer) SetDeterministic(deterministic bool) { + b.deterministic = deterministic +} + +// SetBuf sets buf as the internal buffer, +// where the contents of buf are considered the unread portion of the buffer. +func (b *Buffer) SetBuf(buf []byte) { + b.buf = buf + b.idx = 0 +} + +// Reset clears the internal buffer of all written and unread data. +func (b *Buffer) Reset() { + b.buf = b.buf[:0] + b.idx = 0 +} + +// Bytes returns the internal buffer. +func (b *Buffer) Bytes() []byte { + return b.buf +} + +// Unread returns the unread portion of the buffer. +func (b *Buffer) Unread() []byte { + return b.buf[b.idx:] +} + +// Marshal appends the wire-format encoding of m to the buffer. +func (b *Buffer) Marshal(m Message) error { + var err error + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. +func (b *Buffer) Unmarshal(m Message) error { + err := UnmarshalMerge(b.Unread(), m) + b.idx = len(b.buf) + return err +} + +type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } + +func (m *unknownFields) String() string { panic("not implemented") } +func (m *unknownFields) Reset() { panic("not implemented") } +func (m *unknownFields) ProtoMessage() { panic("not implemented") } + +// DebugPrint dumps the encoded bytes of b with a header and footer including s +// to stdout. This is only intended for debugging. +func (*Buffer) DebugPrint(s string, b []byte) { + m := MessageReflect(new(unknownFields)) + m.SetUnknown(b) + b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) + fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) +} + +// EncodeVarint appends an unsigned varint encoding to the buffer. +func (b *Buffer) EncodeVarint(v uint64) error { + b.buf = protowire.AppendVarint(b.buf, v) + return nil +} + +// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag32(v uint64) error { + return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) +} + +// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag64(v uint64) error { + return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) +} + +// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed32(v uint64) error { + b.buf = protowire.AppendFixed32(b.buf, uint32(v)) + return nil +} + +// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed64(v uint64) error { + b.buf = protowire.AppendFixed64(b.buf, uint64(v)) + return nil +} + +// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. +func (b *Buffer) EncodeRawBytes(v []byte) error { + b.buf = protowire.AppendBytes(b.buf, v) + return nil +} + +// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. +// It does not validate whether v contains valid UTF-8. +func (b *Buffer) EncodeStringBytes(v string) error { + b.buf = protowire.AppendString(b.buf, v) + return nil +} + +// EncodeMessage appends a length-prefixed encoded message to the buffer. +func (b *Buffer) EncodeMessage(m Message) error { + var err error + b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// DecodeVarint consumes an encoded unsigned varint from the buffer. +func (b *Buffer) DecodeVarint() (uint64, error) { + v, n := protowire.ConsumeVarint(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag32() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil +} + +// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag64() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil +} + +// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed32() (uint64, error) { + v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed64() (uint64, error) { + v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. +// If alloc is specified, it returns a copy the raw bytes +// rather than a sub-slice of the buffer. +func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { + v, n := protowire.ConsumeBytes(b.buf[b.idx:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + b.idx += n + if alloc { + v = append([]byte(nil), v...) + } + return v, nil +} + +// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. +// It does not validate whether the raw bytes contain valid UTF-8. +func (b *Buffer) DecodeStringBytes() (string, error) { + v, n := protowire.ConsumeString(b.buf[b.idx:]) + if n < 0 { + return "", protowire.ParseError(n) + } + b.idx += n + return v, nil +} + +// DecodeMessage consumes a length-prefixed message from the buffer. +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeMessage(m Message) error { + v, err := b.DecodeRawBytes(false) + if err != nil { + return err + } + return UnmarshalMerge(v, m) +} + +// DecodeGroup consumes a message group from the buffer. +// It assumes that the start group marker has already been consumed and +// consumes all bytes until (and including the end group marker). +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeGroup(m Message) error { + v, n, err := consumeGroup(b.buf[b.idx:]) + if err != nil { + return err + } + b.idx += n + return UnmarshalMerge(v, m) +} + +// consumeGroup parses b until it finds an end group marker, returning +// the raw bytes of the message (excluding the end group marker) and the +// the total length of the message (including the end group marker). +func consumeGroup(b []byte) ([]byte, int, error) { + b0 := b + depth := 1 // assume this follows a start group marker + for { + _, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return nil, 0, protowire.ParseError(tagLen) + } + b = b[tagLen:] + + var valLen int + switch wtyp { + case protowire.VarintType: + _, valLen = protowire.ConsumeVarint(b) + case protowire.Fixed32Type: + _, valLen = protowire.ConsumeFixed32(b) + case protowire.Fixed64Type: + _, valLen = protowire.ConsumeFixed64(b) + case protowire.BytesType: + _, valLen = protowire.ConsumeBytes(b) + case protowire.StartGroupType: + depth++ + case protowire.EndGroupType: + depth-- + default: + return nil, 0, errors.New("proto: cannot parse reserved wire type") + } + if valLen < 0 { + return nil, 0, protowire.ParseError(valLen) + } + b = b[valLen:] + + if depth == 0 { + return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil + } + } +} diff --git a/extender/code/vendor/github.com/golang/protobuf/proto/defaults.go b/extender/code/vendor/github.com/golang/protobuf/proto/defaults.go new file mode 100644 index 000000000..d399bf069 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/proto/defaults.go @@ -0,0 +1,63 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// SetDefaults sets unpopulated scalar fields to their default values. +// Fields within a oneof are not set even if they have a default value. +// SetDefaults is recursively called upon any populated message fields. +func SetDefaults(m Message) { + if m != nil { + setDefaults(MessageReflect(m)) + } +} + +func setDefaults(m protoreflect.Message) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if !m.Has(fd) { + if fd.HasDefault() && fd.ContainingOneof() == nil { + v := fd.Default() + if fd.Kind() == protoreflect.BytesKind { + v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes + } + m.Set(fd, v) + } + continue + } + } + + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + setDefaults(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + setDefaults(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + setDefaults(v.Message()) + return true + }) + } + } + return true + }) +} diff --git a/extender/code/vendor/github.com/golang/protobuf/proto/deprecated.go b/extender/code/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 000000000..e8db57e09 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,113 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + + protoV2 "google.golang.org/protobuf/proto" +) + +var ( + // Deprecated: No longer returned. + ErrNil = errors.New("proto: Marshal called with nil") + + // Deprecated: No longer returned. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") + + // Deprecated: No longer returned. + ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") +) + +// Deprecated: Do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: Do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: Do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func RegisterMessageSetType(Message, int32, string) {} + +// Deprecated: Do not use. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// Deprecated: Do not use. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// Deprecated: Do not use; this type existed for intenal-use only. +type InternalMessageInfo struct{} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) DiscardUnknown(m Message) { + DiscardUnknown(m) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { + return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Size(m Message) int { + return protoV2.Size(MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { + return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) +} diff --git a/extender/code/vendor/github.com/golang/protobuf/proto/discard.go b/extender/code/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 000000000..2187e877f --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,58 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +func DiscardUnknown(m Message) { + if m != nil { + discardUnknown(MessageReflect(m)) + } +} + +func discardUnknown(m protoreflect.Message) { + m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + discardUnknown(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + discardUnknown(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + discardUnknown(v.Message()) + return true + }) + } + } + return true + }) + + // Discard unknown fields. + if len(m.GetUnknown()) > 0 { + m.SetUnknown(nil) + } +} diff --git a/extender/code/vendor/github.com/golang/protobuf/proto/extensions.go b/extender/code/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 000000000..42fc120c9 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,356 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +type ( + // ExtensionDesc represents an extension descriptor and + // is used to interact with an extension field in a message. + // + // Variables of this type are generated in code by protoc-gen-go. + ExtensionDesc = protoimpl.ExtensionInfo + + // ExtensionRange represents a range of message extensions. + // Used in code generated by protoc-gen-go. + ExtensionRange = protoiface.ExtensionRangeV1 + + // Deprecated: Do not use; this is an internal type. + Extension = protoimpl.ExtensionFieldV1 + + // Deprecated: Do not use; this is an internal type. + XXX_InternalExtensions = protoimpl.ExtensionFields +) + +// ErrMissingExtension reports whether the extension was not present. +var ErrMissingExtension = errors.New("proto: missing extension") + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +// HasExtension reports whether the extension field is present in m +// either as an explicitly populated field or as an unknown field. +func HasExtension(m Message, xt *ExtensionDesc) (has bool) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return false + } + + // Check whether any populated known field matches the field number. + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + has = mr.Has(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + has = int32(fd.Number()) == xt.Field + return !has + }) + } + + // Check whether any unknown field matches the field number. + for b := mr.GetUnknown(); !has && len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + has = int32(num) == xt.Field + b = b[n:] + } + return has +} + +// ClearExtension removes the extension field from m +// either as an explicitly populated field or as an unknown field. +func ClearExtension(m Message, xt *ExtensionDesc) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return + } + + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + mr.Clear(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if int32(fd.Number()) == xt.Field { + mr.Clear(fd) + return false + } + return true + }) + } + clearUnknown(mr, fieldNum(xt.Field)) +} + +// ClearAllExtensions clears all extensions from m. +// This includes populated fields and unknown fields in the extension range. +func ClearAllExtensions(m Message) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return + } + + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.IsExtension() { + mr.Clear(fd) + } + return true + }) + clearUnknown(mr, mr.Descriptor().ExtensionRanges()) +} + +// GetExtension retrieves a proto2 extended field from m. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes for the extension field. +func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Retrieve the unknown fields for this extension field. + var bo protoreflect.RawFields + for bi := mr.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if int32(num) == xt.Field { + bo = append(bo, bi[:n]...) + } + bi = bi[n:] + } + + // For type incomplete descriptors, only retrieve the unknown fields. + if xt.ExtensionType == nil { + return []byte(bo), nil + } + + // If the extension field only exists as unknown fields, unmarshal it. + // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) + } + if !mr.Has(xtd) && len(bo) > 0 { + m2 := mr.New() + if err := (proto.UnmarshalOptions{ + Resolver: extensionResolver{xt}, + }.Unmarshal(bo, m2.Interface())); err != nil { + return nil, err + } + if m2.Has(xtd) { + mr.Set(xtd, m2.Get(xtd)) + clearUnknown(mr, fieldNum(xt.Field)) + } + } + + // Check whether the message has the extension field set or a default. + var pv protoreflect.Value + switch { + case mr.Has(xtd): + pv = mr.Get(xtd) + case xtd.HasDefault(): + pv = xtd.Default() + default: + return nil, ErrMissingExtension + } + + v := xt.InterfaceOf(pv) + rv := reflect.ValueOf(v) + if isScalarKind(rv.Kind()) { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + return v, nil +} + +// extensionResolver is a custom extension resolver that stores a single +// extension type that takes precedence over the global registry. +type extensionResolver struct{ xt protoreflect.ExtensionType } + +func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { + return r.xt, nil + } + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { + return r.xt, nil + } + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +// GetExtensions returns a list of the extensions values present in m, +// corresponding with the provided list of extension descriptors, xts. +// If an extension is missing in m, the corresponding value is nil. +func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return nil, errNotExtendable + } + + vs := make([]interface{}, len(xts)) + for i, xt := range xts { + v, err := GetExtension(m, xt) + if err != nil { + if err == ErrMissingExtension { + continue + } + return vs, err + } + vs[i] = v + } + return vs, nil +} + +// SetExtension sets an extension field in m to the provided value. +func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return errNotExtendable + } + + rv := reflect.ValueOf(v) + if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) + } + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) + } + if isScalarKind(rv.Elem().Kind()) { + v = rv.Elem().Interface() + } + } + + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) + } + mr.Set(xtd, xt.ValueOf(v)) + clearUnknown(mr, fieldNum(xt.Field)) + return nil +} + +// SetRawExtension inserts b into the unknown fields of m. +// +// Deprecated: Use Message.ProtoReflect.SetUnknown instead. +func SetRawExtension(m Message, fnum int32, b []byte) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return + } + + // Verify that the raw field is valid. + for b0 := b; len(b0) > 0; { + num, _, n := protowire.ConsumeField(b0) + if int32(num) != fnum { + panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) + } + b0 = b0[n:] + } + + ClearExtension(m, &ExtensionDesc{Field: fnum}) + mr.SetUnknown(append(mr.GetUnknown(), b...)) +} + +// ExtensionDescs returns a list of extension descriptors found in m, +// containing descriptors for both populated extension fields in m and +// also unknown fields of m that are in the extension range. +// For the later case, an type incomplete descriptor is provided where only +// the ExtensionDesc.Field field is populated. +// The order of the extension descriptors is undefined. +func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Collect a set of known extension descriptors. + extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) + mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor) + if xd, ok := xt.Type().(*ExtensionDesc); ok { + extDescs[fd.Number()] = xd + } + } + return true + }) + + // Collect a set of unknown extension descriptors. + extRanges := mr.Descriptor().ExtensionRanges() + for b := mr.GetUnknown(); len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + if extRanges.Has(num) && extDescs[num] == nil { + extDescs[num] = nil + } + b = b[n:] + } + + // Transpose the set of descriptors into a list. + var xts []*ExtensionDesc + for num, xt := range extDescs { + if xt == nil { + xt = &ExtensionDesc{Field: int32(num)} + } + xts = append(xts, xt) + } + return xts, nil +} + +// isValidExtension reports whether xtd is a valid extension descriptor for md. +func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { + return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) +} + +// isScalarKind reports whether k is a protobuf scalar kind (except bytes). +// This function exists for historical reasons since the representation of +// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. +func isScalarKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + return true + default: + return false + } +} + +// clearUnknown removes unknown fields from m where remover.Has reports true. +func clearUnknown(m protoreflect.Message, remover interface { + Has(protoreflect.FieldNumber) bool +}) { + var bo protoreflect.RawFields + for bi := m.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if !remover.Has(num) { + bo = append(bo, bi[:n]...) + } + bi = bi[n:] + } + if bi := m.GetUnknown(); len(bi) != len(bo) { + m.SetUnknown(bo) + } +} + +type fieldNum protoreflect.FieldNumber + +func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { + return protoreflect.FieldNumber(n1) == n2 +} diff --git a/extender/code/vendor/github.com/golang/protobuf/proto/properties.go b/extender/code/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 000000000..dcdc2202f --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,306 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// StructProperties represents protocol buffer type information for a +// generated protobuf message in the open-struct API. +// +// Deprecated: Do not use. +type StructProperties struct { + // Prop are the properties for each field. + // + // Fields belonging to a oneof are stored in OneofTypes instead, with a + // single Properties representing the parent oneof held here. + // + // The order of Prop matches the order of fields in the Go struct. + // Struct fields that are not related to protobufs have a "XXX_" prefix + // in the Properties.Name and must be ignored by the user. + Prop []*Properties + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the protobuf field name. + OneofTypes map[string]*OneofProperties +} + +// Properties represents the type information for a protobuf message field. +// +// Deprecated: Do not use. +type Properties struct { + // Name is a placeholder name with little meaningful semantic value. + // If the name has an "XXX_" prefix, the entire Properties must be ignored. + Name string + // OrigName is the protobuf field name or oneof name. + OrigName string + // JSONName is the JSON name for the protobuf field. + JSONName string + // Enum is a placeholder name for enums. + // For historical reasons, this is neither the Go name for the enum, + // nor the protobuf name for the enum. + Enum string // Deprecated: Do not use. + // Weak contains the full name of the weakly referenced message. + Weak string + // Wire is a string representation of the wire type. + Wire string + // WireType is the protobuf wire type for the field. + WireType int + // Tag is the protobuf field number. + Tag int + // Required reports whether this is a required field. + Required bool + // Optional reports whether this is a optional field. + Optional bool + // Repeated reports whether this is a repeated field. + Repeated bool + // Packed reports whether this is a packed repeated field of scalars. + Packed bool + // Proto3 reports whether this field operates under the proto3 syntax. + Proto3 bool + // Oneof reports whether this field belongs within a oneof. + Oneof bool + + // Default is the default value in string form. + Default string + // HasDefault reports whether the field has a default value. + HasDefault bool + + // MapKeyProp is the properties for the key field for a map field. + MapKeyProp *Properties + // MapValProp is the properties for the value field for a map field. + MapValProp *Properties +} + +// OneofProperties represents the type information for a protobuf oneof. +// +// Deprecated: Do not use. +type OneofProperties struct { + // Type is a pointer to the generated wrapper type for the field value. + // This is nil for messages that are not in the open-struct API. + Type reflect.Type + // Field is the index into StructProperties.Prop for the containing oneof. + Field int + // Prop is the properties for the field. + Prop *Properties +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != "" { + s += ",json=" + p.JSONName + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Weak) > 0 { + s += ",weak=" + p.Weak + } + if p.Proto3 { + s += ",proto3" + } + if p.Oneof { + s += ",oneof" + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(tag string) { + // For example: "bytes,49,opt,name=foo,def=hello!" + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + p.OrigName = s[len("name="):] + case strings.HasPrefix(s, "json="): + p.JSONName = s[len("json="):] + case strings.HasPrefix(s, "enum="): + p.Enum = s[len("enum="):] + case strings.HasPrefix(s, "weak="): + p.Weak = s[len("weak="):] + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + p.Tag = int(n) + case s == "opt": + p.Optional = true + case s == "req": + p.Required = true + case s == "rep": + p.Repeated = true + case s == "varint" || s == "zigzag32" || s == "zigzag64": + p.Wire = s + p.WireType = WireVarint + case s == "fixed32": + p.Wire = s + p.WireType = WireFixed32 + case s == "fixed64": + p.Wire = s + p.WireType = WireFixed64 + case s == "bytes": + p.Wire = s + p.WireType = WireBytes + case s == "group": + p.Wire = s + p.WireType = WireStartGroup + case s == "packed": + p.Packed = true + case s == "proto3": + p.Proto3 = true + case s == "oneof": + p.Oneof = true + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. + p.HasDefault = true + p.Default, i = tag[len("def="):], len(tag) + } + tag = strings.TrimPrefix(tag[i:], ",") + } +} + +// Init populates the properties from a protocol buffer struct tag. +// +// Deprecated: Do not use. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + + if typ != nil && typ.Kind() == reflect.Map { + p.MapKeyProp = new(Properties) + p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) + p.MapValProp = new(Properties) + p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) + } +} + +var propertiesCache sync.Map // map[reflect.Type]*StructProperties + +// GetProperties returns the list of properties for the type represented by t, +// which must be a generated protocol buffer message in the open-struct API, +// where protobuf message fields are represented by exported Go struct fields. +// +// Deprecated: Use protobuf reflection instead. +func GetProperties(t reflect.Type) *StructProperties { + if p, ok := propertiesCache.Load(t); ok { + return p.(*StructProperties) + } + p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) + return p.(*StructProperties) +} + +func newProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + + var hasOneof bool + prop := new(StructProperties) + + // Construct a list of properties for each field in the struct. + for i := 0; i < t.NumField(); i++ { + p := new(Properties) + f := t.Field(i) + tagField := f.Tag.Get("protobuf") + p.Init(f.Type, f.Name, tagField, &f) + + tagOneof := f.Tag.Get("protobuf_oneof") + if tagOneof != "" { + hasOneof = true + p.OrigName = tagOneof + } + + // Rename unrelated struct fields with the "XXX_" prefix since so much + // user code simply checks for this to exclude special fields. + if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { + p.Name = "XXX_" + p.Name + p.OrigName = "XXX_" + p.OrigName + } else if p.Weak != "" { + p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field + } + + prop.Prop = append(prop.Prop, p) + } + + // Construct a mapping of oneof field names to properties. + if hasOneof { + var oneofWrappers []interface{} + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) + } + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) + } + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { + if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { + oneofWrappers = m.ProtoMessageInfo().OneofWrappers + } + } + + prop.OneofTypes = make(map[string]*OneofProperties) + for _, wrapper := range oneofWrappers { + p := &OneofProperties{ + Type: reflect.ValueOf(wrapper).Type(), // *T + Prop: new(Properties), + } + f := p.Type.Elem().Field(0) + p.Prop.Name = f.Name + p.Prop.Parse(f.Tag.Get("protobuf")) + + // Determine the struct field that contains this oneof. + // Each wrapper is assignable to exactly one parent field. + var foundOneof bool + for i := 0; i < t.NumField() && !foundOneof; i++ { + if p.Type.AssignableTo(t.Field(i).Type) { + p.Field = i + foundOneof = true + } + } + if !foundOneof { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + prop.OneofTypes[p.Prop.OrigName] = p + } + } + + return prop +} + +func (sp *StructProperties) Len() int { return len(sp.Prop) } +func (sp *StructProperties) Less(i, j int) bool { return false } +func (sp *StructProperties) Swap(i, j int) { return } diff --git a/extender/code/vendor/github.com/golang/protobuf/proto/proto.go b/extender/code/vendor/github.com/golang/protobuf/proto/proto.go new file mode 100644 index 000000000..5aee89c32 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/proto/proto.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functionality for handling protocol buffer messages. +// In particular, it provides marshaling and unmarshaling between a protobuf +// message and the binary wire format. +// +// See https://developers.google.com/protocol-buffers/docs/gotutorial for +// more information. +// +// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + ProtoPackageIsVersion1 = true + ProtoPackageIsVersion2 = true + ProtoPackageIsVersion3 = true + ProtoPackageIsVersion4 = true +) + +// GeneratedEnum is any enum type generated by protoc-gen-go +// which is a named int32 kind. +// This type exists for documentation purposes. +type GeneratedEnum interface{} + +// GeneratedMessage is any message type generated by protoc-gen-go +// which is a pointer to a named struct kind. +// This type exists for documentation purposes. +type GeneratedMessage interface{} + +// Message is a protocol buffer message. +// +// This is the v1 version of the message interface and is marginally better +// than an empty interface as it lacks any method to programatically interact +// with the contents of the message. +// +// A v2 message is declared in "google.golang.org/protobuf/proto".Message and +// exposes protobuf reflection as a first-class feature of the interface. +// +// To convert a v1 message to a v2 message, use the MessageV2 function. +// To convert a v2 message to a v1 message, use the MessageV1 function. +type Message = protoiface.MessageV1 + +// MessageV1 converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1(m GeneratedMessage) protoiface.MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2 converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func MessageV2(m GeneratedMessage) protoV2.Message { + return protoimpl.X.ProtoMessageV2Of(m) +} + +// MessageReflect returns a reflective view for a message. +// It returns nil if m is nil. +func MessageReflect(m Message) protoreflect.Message { + return protoimpl.X.MessageOf(m) +} + +// Marshaler is implemented by messages that can marshal themselves. +// This interface is used by the following functions: Size, Marshal, +// Buffer.Marshal, and Buffer.EncodeMessage. +// +// Deprecated: Do not implement. +type Marshaler interface { + // Marshal formats the encoded bytes of the message. + // It should be deterministic and emit valid protobuf wire data. + // The caller takes ownership of the returned buffer. + Marshal() ([]byte, error) +} + +// Unmarshaler is implemented by messages that can unmarshal themselves. +// This interface is used by the following functions: Unmarshal, UnmarshalMerge, +// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. +// +// Deprecated: Do not implement. +type Unmarshaler interface { + // Unmarshal parses the encoded bytes of the protobuf wire input. + // The provided buffer is only valid for during method call. + // It should not reset the receiver message. + Unmarshal([]byte) error +} + +// Merger is implemented by messages that can merge themselves. +// This interface is used by the following functions: Clone and Merge. +// +// Deprecated: Do not implement. +type Merger interface { + // Merge merges the contents of src into the receiver message. + // It clones all data structures in src such that it aliases no mutable + // memory referenced by src. + Merge(src Message) +} + +// RequiredNotSetError is an error type returned when +// marshaling or unmarshaling a message with missing required fields. +type RequiredNotSetError struct { + err error +} + +func (e *RequiredNotSetError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "proto: required field not set" +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +func checkRequiredNotSet(m protoV2.Message) error { + if err := protoV2.CheckInitialized(m); err != nil { + return &RequiredNotSetError{err: err} + } + return nil +} + +// Clone returns a deep copy of src. +func Clone(src Message) Message { + return MessageV1(protoV2.Clone(MessageV2(src))) +} + +// Merge merges src into dst, which must be messages of the same type. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +func Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they are the same protobuf message type, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + return protoV2.Equal(MessageV2(x), MessageV2(y)) +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/extender/code/vendor/github.com/golang/protobuf/proto/registry.go b/extender/code/vendor/github.com/golang/protobuf/proto/registry.go new file mode 100644 index 000000000..1e7ff6420 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/proto/registry.go @@ -0,0 +1,323 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// filePath is the path to the proto source file. +type filePath = string // e.g., "google/protobuf/descriptor.proto" + +// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. +type fileDescGZIP = []byte + +var fileCache sync.Map // map[filePath]fileDescGZIP + +// RegisterFile is called from generated code to register the compressed +// FileDescriptorProto with the file path for a proto source file. +// +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. +func RegisterFile(s filePath, d fileDescGZIP) { + // Decompress the descriptor. + zr, err := gzip.NewReader(bytes.NewReader(d)) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + + // Construct a protoreflect.FileDescriptor from the raw descriptor. + // Note that DescBuilder.Build automatically registers the constructed + // file descriptor with the v2 registry. + protoimpl.DescBuilder{RawDescriptor: b}.Build() + + // Locally cache the raw descriptor form for the file. + fileCache.Store(s, d) +} + +// FileDescriptor returns the compressed FileDescriptorProto given the file path +// for a proto source file. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. +func FileDescriptor(s filePath) fileDescGZIP { + if v, ok := fileCache.Load(s); ok { + return v.(fileDescGZIP) + } + + // Find the descriptor in the v2 registry. + var b []byte + if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { + if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { + b = fd.ProtoLegacyRawDesc() + } else { + // TODO: Use protodesc.ToFileDescriptorProto to construct + // a descriptorpb.FileDescriptorProto and marshal it. + // However, doing so causes the proto package to have a dependency + // on descriptorpb, leading to cyclic dependency issues. + } + } + + // Locally cache the raw descriptor form for the file. + if len(b) > 0 { + v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) + return v.(fileDescGZIP) + } + return nil +} + +// enumName is the name of an enum. For historical reasons, the enum name is +// neither the full Go name nor the full protobuf name of the enum. +// The name is the dot-separated combination of just the proto package that the +// enum is declared within followed by the Go type name of the generated enum. +type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" + +// enumsByName maps enum values by name to their numeric counterpart. +type enumsByName = map[string]int32 + +// enumsByNumber maps enum values by number to their name counterpart. +type enumsByNumber = map[int32]string + +var enumCache sync.Map // map[enumName]enumsByName +var numFilesCache sync.Map // map[protoreflect.FullName]int + +// RegisterEnum is called from the generated code to register the mapping of +// enum value names to enum numbers for the enum identified by s. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. +func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { + if _, ok := enumCache.Load(s); ok { + panic("proto: duplicate enum registered: " + s) + } + enumCache.Store(s, m) + + // This does not forward registration to the v2 registry since this API + // lacks sufficient information to construct a complete v2 enum descriptor. +} + +// EnumValueMap returns the mapping from enum value names to enum numbers for +// the enum of the given name. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. +func EnumValueMap(s enumName) enumsByName { + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + + // Check whether the cache is stale. If the number of files in the current + // package differs, then it means that some enums may have been recently + // registered upstream that we do not know about. + var protoPkg protoreflect.FullName + if i := strings.LastIndexByte(s, '.'); i >= 0 { + protoPkg = protoreflect.FullName(s[:i]) + } + v, _ := numFilesCache.Load(protoPkg) + numFiles, _ := v.(int) + if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { + return nil // cache is up-to-date; was not found earlier + } + + // Update the enum cache for all enums declared in the given proto package. + numFiles = 0 + protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { + walkEnums(fd, func(ed protoreflect.EnumDescriptor) { + name := protoimpl.X.LegacyEnumName(ed) + if _, ok := enumCache.Load(name); !ok { + m := make(enumsByName) + evs := ed.Values() + for i := evs.Len() - 1; i >= 0; i-- { + ev := evs.Get(i) + m[string(ev.Name())] = int32(ev.Number()) + } + enumCache.LoadOrStore(name, m) + } + }) + numFiles++ + return true + }) + numFilesCache.Store(protoPkg, numFiles) + + // Check cache again for enum map. + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + return nil +} + +// walkEnums recursively walks all enums declared in d. +func walkEnums(d interface { + Enums() protoreflect.EnumDescriptors + Messages() protoreflect.MessageDescriptors +}, f func(protoreflect.EnumDescriptor)) { + eds := d.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + } + mds := d.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + walkEnums(mds.Get(i), f) + } +} + +// messageName is the full name of protobuf message. +type messageName = string + +var messageTypeCache sync.Map // map[messageName]reflect.Type + +// RegisterType is called from generated code to register the message Go type +// for a message of the given name. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. +func RegisterType(m Message, s messageName) { + mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) + if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { + panic(err) + } + messageTypeCache.Store(s, reflect.TypeOf(m)) +} + +// RegisterMapType is called from generated code to register the Go map type +// for a protobuf message representing a map entry. +// +// Deprecated: Do not use. +func RegisterMapType(m interface{}, s messageName) { + t := reflect.TypeOf(m) + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid map kind: %v", t)) + } + if _, ok := messageTypeCache.Load(s); ok { + panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) + } + messageTypeCache.Store(s, t) +} + +// MessageType returns the message type for a named message. +// It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. +func MessageType(s messageName) reflect.Type { + if v, ok := messageTypeCache.Load(s); ok { + return v.(reflect.Type) + } + + // Derive the message type from the v2 registry. + var t reflect.Type + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { + t = messageGoType(mt) + } + + // If we could not get a concrete type, it is possible that it is a + // pseudo-message for a map entry. + if t == nil { + d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) + if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { + kt := goTypeForField(md.Fields().ByNumber(1)) + vt := goTypeForField(md.Fields().ByNumber(2)) + t = reflect.MapOf(kt, vt) + } + } + + // Locally cache the message type for the given name. + if t != nil { + v, _ := messageTypeCache.LoadOrStore(s, t) + return v.(reflect.Type) + } + return nil +} + +func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { + switch k := fd.Kind(); k { + case protoreflect.EnumKind: + if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { + return enumGoType(et) + } + return reflect.TypeOf(protoreflect.EnumNumber(0)) + case protoreflect.MessageKind, protoreflect.GroupKind: + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { + return messageGoType(mt) + } + return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() + default: + return reflect.TypeOf(fd.Default().Interface()) + } +} + +func enumGoType(et protoreflect.EnumType) reflect.Type { + return reflect.TypeOf(et.New(0)) +} + +func messageGoType(mt protoreflect.MessageType) reflect.Type { + return reflect.TypeOf(MessageV1(mt.Zero().Interface())) +} + +// MessageName returns the full protobuf name for the given message type. +// +// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. +func MessageName(m Message) messageName { + if m == nil { + return "" + } + if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { + return m.XXX_MessageName() + } + return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) +} + +// RegisterExtension is called from the generated code to register +// the extension descriptor. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. +func RegisterExtension(d *ExtensionDesc) { + if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { + panic(err) + } +} + +type extensionsByNumber = map[int32]*ExtensionDesc + +var extensionCache sync.Map // map[messageName]extensionsByNumber + +// RegisteredExtensions returns a map of the registered extensions for the +// provided protobuf message, indexed by the extension field number. +// +// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. +func RegisteredExtensions(m Message) extensionsByNumber { + // Check whether the cache is stale. If the number of extensions for + // the given message differs, then it means that some extensions were + // recently registered upstream that we do not know about. + s := MessageName(m) + v, _ := extensionCache.Load(s) + xs, _ := v.(extensionsByNumber) + if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { + return xs // cache is up-to-date + } + + // Cache is stale, re-compute the extensions map. + xs = make(extensionsByNumber) + protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { + if xd, ok := xt.(*ExtensionDesc); ok { + xs[int32(xt.TypeDescriptor().Number())] = xd + } else { + // TODO: This implies that the protoreflect.ExtensionType is a + // custom type not generated by protoc-gen-go. We could try and + // convert the type to an ExtensionDesc. + } + return true + }) + extensionCache.Store(s, xs) + return xs +} diff --git a/extender/code/vendor/github.com/golang/protobuf/proto/text_decode.go b/extender/code/vendor/github.com/golang/protobuf/proto/text_decode.go new file mode 100644 index 000000000..47eb3e445 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -0,0 +1,801 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/prototext" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextUnmarshalV2 = false + +// ParseError is returned by UnmarshalText. +type ParseError struct { + Message string + + // Deprecated: Do not use. + Line, Offset int +} + +func (e *ParseError) Error() string { + if wrapTextUnmarshalV2 { + return e.Message + } + if e.Line == 1 { + return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) + } + return fmt.Sprintf("line %d: %v", e.Line, e.Message) +} + +// UnmarshalText parses a proto text formatted string into m. +func UnmarshalText(s string, m Message) error { + if u, ok := m.(encoding.TextUnmarshaler); ok { + return u.UnmarshalText([]byte(s)) + } + + m.Reset() + mi := MessageV2(m) + + if wrapTextUnmarshalV2 { + err := prototext.UnmarshalOptions{ + AllowPartial: true, + }.Unmarshal([]byte(s), mi) + if err != nil { + return &ParseError{Message: err.Error()} + } + return checkRequiredNotSet(mi) + } else { + if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { + return err + } + return checkRequiredNotSet(mi) + } +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { + md := m.Descriptor() + fds := md.Fields() + + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + seen := make(map[protoreflect.FieldNumber]bool) + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + if err := p.unmarshalExtensionOrAny(m, seen); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := protoreflect.Name(tok.value) + fd := fds.ByName(name) + switch { + case fd == nil: + gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { + fd = gd + } + case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: + fd = nil + case fd.IsWeak() && fd.Message().IsPlaceholder(): + fd = nil + } + if fd == nil { + typeName := string(md.FullName()) + if m, ok := m.Interface().(Message); ok { + t := reflect.TypeOf(m) + if t.Kind() == reflect.Ptr { + typeName = t.Elem().String() + } + } + return p.errorf("unknown field name %q in %v", name, typeName) + } + if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) + } + if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { + return p.errorf("non-repeated field %q was repeated", fd.Name()) + } + seen[fd.Number()] = true + + // Consume any colon. + if err := p.checkForColon(fd); err != nil { + return err + } + + // Parse into the field. + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + if v, err = p.unmarshalValue(v, fd); err != nil { + return err + } + m.Set(fd, v) + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { + name, err := p.consumeExtensionOrAnyName() + if err != nil { + return err + } + + // If it contains a slash, it's an Any type URL. + if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { + tok := p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + + mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) + if err != nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) + } + m2 := mt.New() + if err := p.unmarshalMessage(m2, terminator); err != nil { + return err + } + b, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) + } + + urlFD := m.Descriptor().Fields().ByName("type_url") + valFD := m.Descriptor().Fields().ByName("value") + if seen[urlFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) + } + if seen[valFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) + } + m.Set(urlFD, protoreflect.ValueOfString(name)) + m.Set(valFD, protoreflect.ValueOfBytes(b)) + seen[urlFD.Number()] = true + seen[valFD.Number()] = true + return nil + } + + xname := protoreflect.FullName(name) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(m.Descriptor()) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + return p.errorf("unrecognized extension %q", name) + } + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) + } + + if err := p.checkForColon(fd); err != nil { + return err + } + + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + v, err = p.unmarshalValue(v, fd) + if err != nil { + return err + } + m.Set(fd, v) + return p.consumeOptionalSeparator() +} + +func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch { + case fd.IsList(): + lv := v.List() + var err error + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return v, p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return v, nil + } + + // One value of the repeated field. + p.back() + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + return v, nil + case fd.IsMap(): + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + + keyFD := fd.MapKey() + valFD := fd.MapValue() + + mv := v.Map() + kv := keyFD.Default() + vv := mv.NewValue() + for { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == terminator { + break + } + var err error + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return v, err + } + if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + case "value": + if err := p.checkForColon(valFD); err != nil { + return v, err + } + if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + default: + p.back() + return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + mv.Set(kv.MapKey(), vv) + return v, nil + default: + p.back() + return p.unmarshalSingularValue(v, fd) + } +} + +func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch fd.Kind() { + case protoreflect.BoolKind: + switch tok.value { + case "true", "1", "t", "True": + return protoreflect.ValueOfBool(true), nil + case "false", "0", "f", "False": + return protoreflect.ValueOfBool(false), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil + } + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil + } + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(x)), nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(x)), nil + } + case protoreflect.FloatKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(x)), nil + } + case protoreflect.DoubleKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 64); err == nil { + return protoreflect.ValueOfFloat64(float64(x)), nil + } + case protoreflect.StringKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfString(tok.unquoted), nil + } + case protoreflect.BytesKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil + } + case protoreflect.EnumKind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil + } + vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()), nil + } + case protoreflect.MessageKind, protoreflect.GroupKind: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + err := p.unmarshalMessage(v.Message(), terminator) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } + return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + if fd.Message() == nil { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +// consumeExtensionOrAnyName consumes an extension name or an Any type URL and +// the following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtensionOrAnyName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in unmarshalMessage to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +var errBadUTF8 = errors.New("proto: bad UTF-8") + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(rune(i)), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} diff --git a/extender/code/vendor/github.com/golang/protobuf/proto/text_encode.go b/extender/code/vendor/github.com/golang/protobuf/proto/text_encode.go new file mode 100644 index 000000000..a31134eeb --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -0,0 +1,560 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextMarshalV2 = false + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line) + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes the proto text format of m to w. +func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { + b, err := tm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// Text returns a proto text formatted string of m. +func (tm *TextMarshaler) Text(m Message) string { + b, _ := tm.marshal(m) + return string(b) +} + +func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return []byte(""), nil + } + + if wrapTextMarshalV2 { + if m, ok := m.(encoding.TextMarshaler); ok { + return m.MarshalText() + } + + opts := prototext.MarshalOptions{ + AllowPartial: true, + EmitUnknown: true, + } + if !tm.Compact { + opts.Indent = " " + } + if !tm.ExpandAny { + opts.Resolver = (*protoregistry.Types)(nil) + } + return opts.Marshal(mr.Interface()) + } else { + w := &textWriter{ + compact: tm.Compact, + expandAny: tm.ExpandAny, + complete: true, + } + + if m, ok := m.(encoding.TextMarshaler); ok { + b, err := m.MarshalText() + if err != nil { + return nil, err + } + w.Write(b) + return w.buf, nil + } + + err := w.writeMessage(mr) + return w.buf, err + } +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// MarshalText writes the proto text format of m to w. +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } + +// MarshalTextString returns a proto text formatted string of m. +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } + +// CompactText writes the compact proto text format of m to w. +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } + +// CompactTextString returns a compact proto text formatted string of m. +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } + +var ( + newline = []byte("\n") + endBraceNewline = []byte("}\n") + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + compact bool // same as TextMarshaler.Compact + expandAny bool // same as TextMarshaler.ExpandAny + complete bool // whether the current position is a complete line + indent int // indentation level; never negative + buf []byte +} + +func (w *textWriter) Write(p []byte) (n int, _ error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, p...) + w.complete = false + return len(p), nil + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + w.buf = append(w.buf, ' ') + n++ + } + w.buf = append(w.buf, frag...) + n += len(frag) + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + w.buf = append(w.buf, frag...) + n += len(frag) + if i+1 < len(frags) { + w.buf = append(w.buf, '\n') + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, c) + w.complete = c == '\n' + return nil +} + +func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + + if fd.Kind() != protoreflect.GroupKind { + w.buf = append(w.buf, fd.Name()...) + w.WriteByte(':') + } else { + // Use message type name for group field name. + w.buf = append(w.buf, fd.Message().Name()...) + } + + if !w.compact { + w.WriteByte(' ') + } +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { + md := m.Descriptor() + fdURL := md.Fields().ByName("type_url") + fdVal := md.Fields().ByName("value") + + url := m.Get(fdURL).String() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return false, nil + } + + b := m.Get(fdVal).Bytes() + m2 := mt.New() + if err := proto.Unmarshal(b, m2.Interface()); err != nil { + return false, nil + } + w.Write([]byte("[")) + if requiresQuotes(url) { + w.writeQuotedString(url) + } else { + w.Write([]byte(url)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.indent++ + } + if err := w.writeMessage(m2); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.indent-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (w *textWriter) writeMessage(m protoreflect.Message) error { + md := m.Descriptor() + if w.expandAny && md.FullName() == "google.protobuf.Any" { + if canExpand, err := w.writeProto3Any(m); canExpand { + return err + } + } + + fds := md.Fields() + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + if fd == nil || !m.Has(fd) { + continue + } + + switch { + case fd.IsList(): + lv := m.Get(fd).List() + for j := 0; j < lv.Len(); j++ { + w.writeName(fd) + v := lv.Get(j) + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + } + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := m.Get(fd).Map() + + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + for _, entry := range entries { + w.writeName(fd) + w.WriteByte('<') + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + w.writeName(kfd) + if err := w.writeSingularValue(entry.key, kfd); err != nil { + return err + } + w.WriteByte('\n') + w.writeName(vfd) + if err := w.writeSingularValue(entry.val, vfd); err != nil { + return err + } + w.WriteByte('\n') + w.indent-- + w.WriteByte('>') + w.WriteByte('\n') + } + default: + w.writeName(fd) + if err := w.writeSingularValue(m.Get(fd), fd); err != nil { + return err + } + w.WriteByte('\n') + } + } + + if b := m.GetUnknown(); len(b) > 0 { + w.writeUnknownFields(b) + } + return w.writeExtensions(m) +} + +func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch fd.Kind() { + case protoreflect.FloatKind, protoreflect.DoubleKind: + switch vf := v.Float(); { + case math.IsInf(vf, +1): + w.Write(posInf) + case math.IsInf(vf, -1): + w.Write(negInf) + case math.IsNaN(vf): + w.Write(nan) + default: + fmt.Fprint(w, v.Interface()) + } + case protoreflect.StringKind: + // NOTE: This does not validate UTF-8 for historical reasons. + w.writeQuotedString(string(v.String())) + case protoreflect.BytesKind: + w.writeQuotedString(string(v.Bytes())) + case protoreflect.MessageKind, protoreflect.GroupKind: + var bra, ket byte = '<', '>' + if fd.Kind() == protoreflect.GroupKind { + bra, ket = '{', '}' + } + w.WriteByte(bra) + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + m := v.Message() + if m2, ok := m.Interface().(encoding.TextMarshaler); ok { + b, err := m2.MarshalText() + if err != nil { + return err + } + w.Write(b) + } else { + w.writeMessage(m) + } + w.indent-- + w.WriteByte(ket) + case protoreflect.EnumKind: + if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { + fmt.Fprint(w, ev.Name()) + } else { + fmt.Fprint(w, v.Enum()) + } + default: + fmt.Fprint(w, v.Interface()) + } + return nil +} + +// writeQuotedString writes a quoted string in the protocol buffer text format. +func (w *textWriter) writeQuotedString(s string) { + w.WriteByte('"') + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\n': + w.buf = append(w.buf, `\n`...) + case '\r': + w.buf = append(w.buf, `\r`...) + case '\t': + w.buf = append(w.buf, `\t`...) + case '"': + w.buf = append(w.buf, `\"`...) + case '\\': + w.buf = append(w.buf, `\\`...) + default: + if isPrint := c >= 0x20 && c < 0x7f; isPrint { + w.buf = append(w.buf, c) + } else { + w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) + } + } + } + w.WriteByte('"') +} + +func (w *textWriter) writeUnknownFields(b []byte) { + if !w.compact { + fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) + } + + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return + } + b = b[n:] + + if wtyp == protowire.EndGroupType { + w.indent-- + w.Write(endBraceNewline) + continue + } + fmt.Fprint(w, num) + if wtyp != protowire.StartGroupType { + w.WriteByte(':') + } + if !w.compact || wtyp == protowire.StartGroupType { + w.WriteByte(' ') + } + switch wtyp { + case protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed64Type: + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.BytesType: + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprintf(w, "%q", v) + case protowire.StartGroupType: + w.WriteByte('{') + w.indent++ + default: + fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) + } + w.WriteByte('\n') + } +} + +// writeExtensions writes all the extensions in m. +func (w *textWriter) writeExtensions(m protoreflect.Message) error { + md := m.Descriptor() + if md.ExtensionRanges().Len() == 0 { + return nil + } + + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + // For message set, use the name of the message as the extension name. + name := string(ext.desc.FullName()) + if isMessageSet(ext.desc.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + if !ext.desc.IsList() { + if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { + return err + } + } else { + lv := ext.val.List() + for i := 0; i < lv.Len(); i++ { + if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { + return err + } + } + } + } + return nil +} + +func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + fmt.Fprintf(w, "[%s]:", name) + if !w.compact { + w.WriteByte(' ') + } + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + for i := 0; i < w.indent*2; i++ { + w.buf = append(w.buf, ' ') + } + w.complete = false +} diff --git a/extender/code/vendor/github.com/golang/protobuf/proto/wire.go b/extender/code/vendor/github.com/golang/protobuf/proto/wire.go new file mode 100644 index 000000000..d7c28da5a --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/proto/wire.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + if m == nil { + return 0 + } + mi := MessageV2(m) + return protoV2.Size(mi) +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + b, err := marshalAppend(nil, m, false) + if b == nil { + b = zeroBytes + } + return b, err +} + +var zeroBytes = make([]byte, 0, 0) + +func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { + if m == nil { + return nil, ErrNil + } + mi := MessageV2(m) + nbuf, err := protoV2.MarshalOptions{ + Deterministic: deterministic, + AllowPartial: true, + }.MarshalAppend(buf, mi) + if err != nil { + return buf, err + } + if len(buf) == len(nbuf) { + if !mi.ProtoReflect().IsValid() { + return buf, ErrNil + } + } + return nbuf, checkRequiredNotSet(mi) +} + +// Unmarshal parses a wire-format message in b and places the decoded results in m. +// +// Unmarshal resets m before starting to unmarshal, so any existing data in m is always +// removed. Use UnmarshalMerge to preserve and append to existing data. +func Unmarshal(b []byte, m Message) error { + m.Reset() + return UnmarshalMerge(b, m) +} + +// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. +func UnmarshalMerge(b []byte, m Message) error { + mi := MessageV2(m) + out, err := protoV2.UnmarshalOptions{ + AllowPartial: true, + Merge: true, + }.UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: mi.ProtoReflect(), + }) + if err != nil { + return err + } + if out.Flags&protoiface.UnmarshalInitialized > 0 { + return nil + } + return checkRequiredNotSet(mi) +} diff --git a/extender/code/vendor/github.com/golang/protobuf/proto/wrappers.go b/extender/code/vendor/github.com/golang/protobuf/proto/wrappers.go new file mode 100644 index 000000000..398e34859 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/proto/wrappers.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int stores v in a new int32 value and returns a pointer to it. +// +// Deprecated: Use Int32 instead. +func Int(v int) *int32 { return Int32(int32(v)) } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/extender/code/vendor/github.com/golang/protobuf/ptypes/any.go b/extender/code/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 000000000..e729dcff1 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,165 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "fmt" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "github.com/golang/protobuf/ptypes/any" +) + +const urlPrefix = "type.googleapis.com/" + +// AnyMessageName returns the message name contained in an anypb.Any message. +// Most type assertions should use the Is function instead. +func AnyMessageName(any *anypb.Any) (string, error) { + name, err := anyMessageName(any) + return string(name), err +} +func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + name := protoreflect.FullName(any.TypeUrl) + if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return name, nil +} + +// MarshalAny marshals the given message m into an anypb.Any message. +func MarshalAny(m proto.Message) (*anypb.Any, error) { + switch dm := m.(type) { + case DynamicAny: + m = dm.Message + case *DynamicAny: + if dm == nil { + return nil, proto.ErrNil + } + m = dm.Message + } + b, err := proto.Marshal(m) + if err != nil { + return nil, err + } + return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil +} + +// Empty returns a new message of the type specified in an anypb.Any message. +// It returns protoregistry.NotFound if the corresponding message type could not +// be resolved in the global registry. +func Empty(any *anypb.Any) (proto.Message, error) { + name, err := anyMessageName(any) + if err != nil { + return nil, err + } + mt, err := protoregistry.GlobalTypes.FindMessageByName(name) + if err != nil { + return nil, err + } + return proto.MessageV1(mt.New().Interface()), nil +} + +// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message +// into the provided message m. It returns an error if the target message +// does not match the type in the Any message or if an unmarshal error occurs. +// +// The target message m may be a *DynamicAny message. If the underlying message +// type could not be resolved, then this returns protoregistry.NotFound. +func UnmarshalAny(any *anypb.Any, m proto.Message) error { + if dm, ok := m.(*DynamicAny); ok { + if dm.Message == nil { + var err error + dm.Message, err = Empty(any) + if err != nil { + return err + } + } + m = dm.Message + } + + anyName, err := AnyMessageName(any) + if err != nil { + return err + } + msgName := proto.MessageName(m) + if anyName != msgName { + return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) + } + return proto.Unmarshal(any.Value, m) +} + +// Is reports whether the Any message contains a message of the specified type. +func Is(any *anypb.Any, m proto.Message) bool { + if any == nil || m == nil { + return false + } + name := proto.MessageName(m) + if !strings.HasSuffix(any.TypeUrl, name) { + return false + } + return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in an anypb.Any message. +// The allocated message is stored in the embedded proto.Message. +// +// Example: +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct{ proto.Message } + +func (m DynamicAny) String() string { + if m.Message == nil { + return "" + } + return m.Message.String() +} +func (m DynamicAny) Reset() { + if m.Message == nil { + return + } + m.Message.Reset() +} +func (m DynamicAny) ProtoMessage() { + return +} +func (m DynamicAny) ProtoReflect() protoreflect.Message { + if m.Message == nil { + return nil + } + return dynamicAny{proto.MessageReflect(m.Message)} +} + +type dynamicAny struct{ protoreflect.Message } + +func (m dynamicAny) Type() protoreflect.MessageType { + return dynamicAnyType{m.Message.Type()} +} +func (m dynamicAny) New() protoreflect.Message { + return dynamicAnyType{m.Message.Type()}.New() +} +func (m dynamicAny) Interface() protoreflect.ProtoMessage { + return DynamicAny{proto.MessageV1(m.Message.Interface())} +} + +type dynamicAnyType struct{ protoreflect.MessageType } + +func (t dynamicAnyType) New() protoreflect.Message { + return dynamicAny{t.MessageType.New()} +} +func (t dynamicAnyType) Zero() protoreflect.Message { + return dynamicAny{t.MessageType.Zero()} +} diff --git a/extender/code/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/extender/code/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 000000000..0ef27d33d --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/any/any.proto + +package any + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/any.proto. + +type Any = anypb.Any + +var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } +func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { + if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_any_any_proto = out.File + file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil +} diff --git a/extender/code/vendor/github.com/golang/protobuf/ptypes/doc.go b/extender/code/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 000000000..fb9edd5c6 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,6 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ptypes provides functionality for interacting with well-known types. +package ptypes diff --git a/extender/code/vendor/github.com/golang/protobuf/ptypes/duration.go b/extender/code/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 000000000..6110ae8a4 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "errors" + "fmt" + "time" + + durationpb "github.com/golang/protobuf/ptypes/duration" +) + +// Range of google.protobuf.Duration as specified in duration.proto. +// This is about 10,000 years in seconds. +const ( + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// Duration converts a durationpb.Duration to a time.Duration. +// Duration returns an error if dur is invalid or overflows a time.Duration. +func Duration(dur *durationpb.Duration) (time.Duration, error) { + if err := validateDuration(dur); err != nil { + return 0, err + } + d := time.Duration(dur.Seconds) * time.Second + if int64(d/time.Second) != dur.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) + } + if dur.Nanos != 0 { + d += time.Duration(dur.Nanos) * time.Nanosecond + if (d < 0) != (dur.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durationpb.Duration. +func DurationProto(d time.Duration) *durationpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durationpb.Duration{ + Seconds: int64(secs), + Nanos: int32(nanos), + } +} + +// validateDuration determines whether the durationpb.Duration is valid +// according to the definition in google/protobuf/duration.proto. +// A valid durpb.Duration may still be too large to fit into a time.Duration +// Note that the range of durationpb.Duration is about 10,000 years, +// while the range of time.Duration is about 290 years. +func validateDuration(dur *durationpb.Duration) error { + if dur == nil { + return errors.New("duration: nil Duration") + } + if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", dur) + } + if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", dur) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) + } + return nil +} diff --git a/extender/code/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/extender/code/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 000000000..d0079ee3e --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,63 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto + +package duration + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/duration.proto. + +type Duration = durationpb.Duration + +var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } +func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { + if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File + file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil +} diff --git a/extender/code/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/extender/code/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 000000000..026d0d491 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,103 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "errors" + "fmt" + "time" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" +) + +// Range of google.protobuf.Duration as specified in timestamp.proto. +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// Timestamp converts a timestamppb.Timestamp to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return +// value is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *timestamppb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { + ts := ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. +// For invalid Timestamps, it returns an error message in parentheses. +func TimestampString(ts *timestamppb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) +// and has a Nanos field in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes the problem. +// +// Every valid Timestamp can be represented by a time.Time, +// but the converse is not true. +func validateTimestamp(ts *timestamppb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} diff --git a/extender/code/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/extender/code/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 000000000..a76f80760 --- /dev/null +++ b/extender/code/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,64 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +package timestamp + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/timestamp.proto. + +type Timestamp = timestamppb.Timestamp + +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil +} diff --git a/extender/code/vendor/github.com/google/go-cmp/LICENSE b/extender/code/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 000000000..32017f8fa --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/compare.go b/extender/code/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 000000000..665618684 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,682 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// It is intended to only be used in tests, as performance is not a goal and +// it may panic if it cannot compare the values. Its propensity towards +// panicking means that its unsuitable for production environments where a +// spurious panic may be fatal. +// +// The primary features of cmp are: +// +// • When the default behavior of equality does not suit the needs of the test, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as they +// are within some tolerance of each other. +// +// • Types that have an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation for the types +// that they define. +// +// • If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on both +// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported +// fields are not compared by default; they result in panics unless suppressed +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly +// compared using the Exporter option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// • Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is greater than one, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform the current +// values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// • If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// • Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, and +// channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option +// explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +// +// Before recursing into a pointer, slice element, or map, the current path +// is checked to detect whether the address has already been visited. +// If there is a cycle, then the pointed at values are considered equal +// only if both addresses were previously visited in the same path step. +func Equal(x, y interface{}, opts ...Option) bool { + s := newState(opts) + s.compareAny(rootStep(x, y)) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values: +// y - x. It returns an empty string if and only if Equal returns true for the +// same input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from y, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + s := newState(opts) + + // Optimization: If there are no other reporters, we can optimize for the + // common case where the result is equal (and thus no reported difference). + // This avoids the expensive construction of a difference tree. + if len(s.reporters) == 0 { + s.compareAny(rootStep(x, y)) + if s.result.Equal() { + return "" + } + s.result = diff.Result{} // Reset results + } + + r := new(defaultReporter) + s.reporters = append(s.reporters, reporter{r}) + s.compareAny(rootStep(x, y)) + d := r.String() + if (d == "") != s.result.Equal() { + panic("inconsistent difference and equality results") + } + return d +} + +// rootStep constructs the first path step. If x and y have differing types, +// then they are stored within an empty interface type. +func rootStep(x, y interface{}) PathStep { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + return &pathStep{t, vx, vy} +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + curPtrs pointerPath // The current set of visited pointers + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters []exporter // List of exporters for structs with unexported fields + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.curPtrs.Init() + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case exporter: + s.exporters = append(s.exporters, opt) + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore curPath and curPtrs because all of the + // compareX methods should properly push and pop from them. + // It is an implementation bug if the contents of the paths differ from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Cycle-detection for slice elements (see NOTE in compareSlice). + t := step.Type() + vx, vy := step.Values() + if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { + px, py := vx.Addr(), vy.Addr() + if eq, visited := s.curPtrs.Push(px, py); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(px, py) + } + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + v = sanitizeValue(v, f.Type().In(0)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + x = sanitizeValue(x, f.Type().In(0)) + y = sanitizeValue(y, f.Type().In(1)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +// sanitizeValue converts nil interfaces of type T to those of type R, +// assuming that T is assignable to R. +// Otherwise, it returns the input value as is. +func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { + // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). + if !flags.AtLeastGo110 { + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } + } + return v +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var addr bool + var vax, vay reflect.Value // Addressable versions of vx and vy + + var mayForce, mayForceInit bool + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + addr = vx.CanAddr() || vy.CanAddr() + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + if !mayForceInit { + for _, xf := range s.exporters { + mayForce = mayForce || xf(t) + } + mayForceInit = true + } + step.mayForce = mayForce + step.paddr = addr + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer + // since slices represents a list of pointers, rather than a single pointer. + // The pointer checking logic must be handled on a per-element basis + // in compareAny. + // + // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting + // pointer P, a length N, and a capacity C. Supposing each slice element has + // a memory size of M, then the slice is equivalent to the list of pointers: + // [P+i*M for i in range(N)] + // + // For example, v[:0] and v[:1] are slices with the same starting pointer, + // but they are clearly different values. Using the slice pointer alone + // violates the assumption that equal pointers implies equal values. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // Cycle-detection for maps. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // Cycle-detection for pointers. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/export_panic.go b/extender/code/vendor/github.com/google/go-cmp/cmp/export_panic.go new file mode 100644 index 000000000..dfa5d2137 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -0,0 +1,15 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package cmp + +import "reflect" + +const supportExporters = false + +func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { + panic("no support for forcibly accessing unexported fields") +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/extender/code/vendor/github.com/google/go-cmp/cmp/export_unsafe.go new file mode 100644 index 000000000..351f1a34b --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -0,0 +1,35 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportExporters = true + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. If addr is false, +// then the returned value will be shallowed copied to be non-addressable. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { + ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() + if !addr { + // A field is addressable if and only if the struct is addressable. + // If the original parent value was not addressable, shallow copy the + // value to make it non-addressable to avoid leaking an implementation + // detail of how forcibly exporting a field works. + if ve.Kind() == reflect.Interface && ve.IsNil() { + return reflect.Zero(f.Type) + } + return reflect.ValueOf(ve.Interface()).Convert(f.Type) + } + return ve +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 000000000..fe98dcc67 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,17 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 000000000..597b6ae56 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,122 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 000000000..730e223ee --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,392 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +import ( + "math/rand" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +var randInt = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// • eq == (es.Dist()==0) +// • nx == es.LenX() +// • ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // To ensure flexibility in changing the algorithm in the future, + // introduce some degree of deliberate instability. + // This is achieved by fiddling the zigzag iterator to start searching + // the graph starting from the bottom-right versus than the top-left. + // The result may differ depending on the starting search location, + // but still produces a valid edit script. + zigzagInit := randInt // either 0 or 1 + if flags.Deterministic { + zigzagInit = 0 + } + + // Invariants: + // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // • fwdFrontier.X < revFrontier.X + // • fwdFrontier.Y < revFrontier.Y + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // • Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). The goal of + // the search is connect with the search from the opposite corner. + // • As we search, we build a path in a greedy manner, where the first + // match seen is added to the path (this is sub-optimal, but provides a + // decent result in practice). When matches are found, we try the next pair + // of symbols in the lists and follow all matches as far as possible. + // • When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, we advance the + // frontier towards the opposite corner. + // • This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + // + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + for { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, zigzagInit; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + } + + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 000000000..a9e7fc0b5 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go new file mode 100644 index 000000000..01aed0a15 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = false diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go new file mode 100644 index 000000000..c0b667f58 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = true diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 000000000..ace1dbe86 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,99 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/name.go new file mode 100644 index 000000000..8228e7d51 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -0,0 +1,157 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "reflect" + "strconv" +) + +// TypeString is nearly identical to reflect.Type.String, +// but has an additional option to specify that full type names be used. +func TypeString(t reflect.Type, qualified bool) string { + return string(appendTypeName(nil, t, qualified, false)) +} + +func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { + // BUG: Go reflection provides no way to disambiguate two named types + // of the same name and within the same package, + // but declared within the namespace of different functions. + + // Named type. + if t.Name() != "" { + if qualified && t.PkgPath() != "" { + b = append(b, '"') + b = append(b, t.PkgPath()...) + b = append(b, '"') + b = append(b, '.') + b = append(b, t.Name()...) + } else { + b = append(b, t.String()...) + } + return b + } + + // Unnamed type. + switch k := t.Kind(); k { + case reflect.Bool, reflect.String, reflect.UnsafePointer, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + b = append(b, k.String()...) + case reflect.Chan: + if t.ChanDir() == reflect.RecvDir { + b = append(b, "<-"...) + } + b = append(b, "chan"...) + if t.ChanDir() == reflect.SendDir { + b = append(b, "<-"...) + } + b = append(b, ' ') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Func: + if !elideFunc { + b = append(b, "func"...) + } + b = append(b, '(') + for i := 0; i < t.NumIn(); i++ { + if i > 0 { + b = append(b, ", "...) + } + if i == t.NumIn()-1 && t.IsVariadic() { + b = append(b, "..."...) + b = appendTypeName(b, t.In(i).Elem(), qualified, false) + } else { + b = appendTypeName(b, t.In(i), qualified, false) + } + } + b = append(b, ')') + switch t.NumOut() { + case 0: + // Do nothing + case 1: + b = append(b, ' ') + b = appendTypeName(b, t.Out(0), qualified, false) + default: + b = append(b, " ("...) + for i := 0; i < t.NumOut(); i++ { + if i > 0 { + b = append(b, ", "...) + } + b = appendTypeName(b, t.Out(i), qualified, false) + } + b = append(b, ')') + } + case reflect.Struct: + b = append(b, "struct{ "...) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + b = append(b, "; "...) + } + sf := t.Field(i) + if !sf.Anonymous { + if qualified && sf.PkgPath != "" { + b = append(b, '"') + b = append(b, sf.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, sf.Name...) + b = append(b, ' ') + } + b = appendTypeName(b, sf.Type, qualified, false) + if sf.Tag != "" { + b = append(b, ' ') + b = strconv.AppendQuote(b, string(sf.Tag)) + } + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + case reflect.Slice, reflect.Array: + b = append(b, '[') + if k == reflect.Array { + b = strconv.AppendUint(b, uint64(t.Len()), 10) + } + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Map: + b = append(b, "map["...) + b = appendTypeName(b, t.Key(), qualified, false) + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Ptr: + b = append(b, '*') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Interface: + b = append(b, "interface{ "...) + for i := 0; i < t.NumMethod(); i++ { + if i > 0 { + b = append(b, "; "...) + } + m := t.Method(i) + if qualified && m.PkgPath != "" { + b = append(b, '"') + b = append(b, m.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, m.Name...) + b = appendTypeName(b, m.Type, qualified, true) + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + default: + panic("invalid kind: " + k.String()) + } + return b +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 000000000..e9e384a1c --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,33 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == 0 +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return p.p +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 000000000..b50c17ec7 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,36 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == nil +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return uintptr(p.p) +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 000000000..24fbae6e3 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,106 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + // NOTE: This does not sort -0 as less than +0 + // since Go maps treat -0 and +0 as equal keys. + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go new file mode 100644 index 000000000..06a8ffd03 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -0,0 +1,48 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "math" + "reflect" +) + +// IsZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func IsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !IsZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/options.go b/extender/code/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 000000000..4b0407a7f --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,552 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + var name string + if t := s.curPath.Index(-2).Type(); t.Name() != "" { + // Named type with unexported fields. + name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" + } + } else { + // Unnamed type with unexported fields. Derive PkgPath from field. + var pkgPath string + for i := 0; i < t.NumField() && pkgPath == ""; i++ { + pkgPath = t.Field(i).PkgPath + } + name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) + } + panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// • Symmetric: equal(x, y) == equal(y, x) +// • Deterministic: equal(x, y) == equal(x, y) +// • Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// Exporter returns an Option that specifies whether Equal is allowed to +// introspect into the unexported fields of certain struct types. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func Exporter(f func(reflect.Type) bool) Option { + if !supportExporters { + panic("Exporter is not supported on purego builds") + } + return exporter(f) +} + +type exporter func(reflect.Type) bool + +func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// AllowUnexported returns an Options that allows Equal to forcibly introspect +// unexported fields of the specified struct types. +// +// See Exporter for the proper use of this option. +func AllowUnexported(types ...interface{}) Option { + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return exporter(func(t reflect.Type) bool { return m[t] }) +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Result (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +// ByCycle reports whether a reference cycle was detected. +func (r Result) ByCycle() bool { + return r.flags&reportByCycle != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc + reportByCycle +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/path.go b/extender/code/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 000000000..603dbb002 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,378 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // • For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // • For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // • For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := ps.typ.String() + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + paddr bool // Was parent addressable? + pvx, pvy reflect.Value // Parent values (always addressible) + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) + vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int + isSlice bool // False for reflect.Array +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// pointerPath represents a dual-stack of pointers encountered when +// recursively traversing the x and y values. This data structure supports +// detection of cycles and determining whether the cycles are equal. +// In Go, cycles can occur via pointers, slices, and maps. +// +// The pointerPath uses a map to represent a stack; where descension into a +// pointer pushes the address onto the stack, and ascension from a pointer +// pops the address from the stack. Thus, when traversing into a pointer from +// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles +// by checking whether the pointer has already been visited. The cycle detection +// uses a seperate stack for the x and y values. +// +// If a cycle is detected we need to determine whether the two pointers +// should be considered equal. The definition of equality chosen by Equal +// requires two graphs to have the same structure. To determine this, both the +// x and y values must have a cycle where the previous pointers were also +// encountered together as a pair. +// +// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and +// MapIndex with pointer information for the x and y values. +// Suppose px and py are two pointers to compare, we then search the +// Path for whether px was ever encountered in the Path history of x, and +// similarly so with py. If either side has a cycle, the comparison is only +// equal if both px and py have a cycle resulting from the same PathStep. +// +// Using a map as a stack is more performant as we can perform cycle detection +// in O(1) instead of O(N) where N is len(Path). +type pointerPath struct { + // mx is keyed by x pointers, where the value is the associated y pointer. + mx map[value.Pointer]value.Pointer + // my is keyed by y pointers, where the value is the associated x pointer. + my map[value.Pointer]value.Pointer +} + +func (p *pointerPath) Init() { + p.mx = make(map[value.Pointer]value.Pointer) + p.my = make(map[value.Pointer]value.Pointer) +} + +// Push indicates intent to descend into pointers vx and vy where +// visited reports whether either has been seen before. If visited before, +// equal reports whether both pointers were encountered together. +// Pop must be called if and only if the pointers were never visited. +// +// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map +// and be non-nil. +func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { + px := value.PointerOf(vx) + py := value.PointerOf(vy) + _, ok1 := p.mx[px] + _, ok2 := p.my[py] + if ok1 || ok2 { + equal = p.mx[px] == py && p.my[py] == px // Pointers paired together + return equal, true + } + p.mx[px] = py + p.my[py] = px + return false, false +} + +// Pop ascends from pointers vx and vy. +func (p pointerPath) Pop(vx, vy reflect.Value) { + delete(p.mx, value.PointerOf(vx)) + delete(p.my, value.PointerOf(vy)) +} + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/report.go b/extender/code/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 000000000..aafcb3635 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,54 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + ptrs := new(pointerReferences) + text := formatOptions{}.FormatDiff(r.root, ptrs) + resolveReferences(text) + return text.String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/report_compare.go b/extender/code/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 000000000..9e2180964 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,432 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} +func (opts formatOptions) WithVerbosity(level int) formatOptions { + opts.VerbosityLevel = level + opts.LimitVerbosity = true + return opts +} +func (opts formatOptions) verbosity() uint { + switch { + case opts.VerbosityLevel < 0: + return 0 + case opts.VerbosityLevel > 16: + return 16 // some reasonable maximum to avoid shift overflow + default: + return uint(opts.VerbosityLevel) + } +} + +const maxVerbosityPreset = 3 + +// verbosityPreset modifies the verbosity settings given an index +// between 0 and maxVerbosityPreset, inclusive. +func verbosityPreset(opts formatOptions, i int) formatOptions { + opts.VerbosityLevel = int(opts.verbosity()) + 2*i + if i > 0 { + opts.AvoidStringer = true + } + if i >= maxVerbosityPreset { + opts.PrintAddresses = true + opts.QualifiedNames = true + } + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { + if opts.DiffMode == diffIdentical { + opts = opts.WithVerbosity(1) + } else { + opts = opts.WithVerbosity(3) + } + + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + var parentKind reflect.Kind + if v.parent != nil && v.parent.TransformerName == "" { + parentKind = v.parent.Type.Kind() + } + + // For leaf nodes, format the value based on the reflect.Values alone. + if v.MaxDepth == 0 { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.FormatValue(v.ValueY, parentKind, ptrs) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) + outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) + outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) + } + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, parentKind, ptrs) + case diffInserted: + return opts.FormatValue(v.ValueY, parentKind, ptrs) + default: + panic("invalid diff mode") + } + } + + // Register slice element to support cycle detection. + if parentKind == reflect.Slice { + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) + defer ptrs.Pop() + defer func() { out = wrapTrunkReferences(ptrRefs, out) }() + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice: + out = opts.formatDiffList(v.Records, k, ptrs) + out = opts.FormatType(v.Type, out) + case reflect.Map: + // Register map to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.formatDiffList(v.Records, k, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = opts.FormatType(v.Type, out) + case reflect.Ptr: + // Register pointer to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.FormatDiff(v.Value, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = &textWrap{Prefix: "&", Value: out} + case reflect.Interface: + out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + return out + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } + } + + maxLen := -1 + if opts.LimitVerbosity { + if opts.DiffMode == diffIdentical { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + } else { + maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... + } + opts.VerbosityLevel-- + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + if len(list) == maxLen { + deferredEllipsis = true + break + } + + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + case diffRemoved: + isZero = value.IsZero(r.Value.ValueX) + case diffInserted: + isZero = value.IsZero(r.Value.ValueY) + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value, ptrs); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var numDiffs int + var list textList + var keys []reflect.Value // invariant: len(list) == len(keys) + groups := coalesceAdjacentRecords(name, recs) + maxGroup := diffStats{Name: name} + for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i) + outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + } + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + keys = append(keys, r.Key) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + keys = append(keys, r.Key) + } + default: + out := opts.FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + } + recs = recs[ds.NumDiff():] + numDiffs += ds.NumDiff() + } + if maxGroup.IsZero() { + assert(len(recs) == 0) + } else { + list.AppendEllipsis(maxGroup) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } + } + assert(len(list) == len(keys)) + + // For maps, the default formatting logic uses fmt.Stringer which may + // produce ambiguous output. Avoid calling String to disambiguate. + if k == reflect.Map { + var ambiguous bool + seenKeys := map[string]reflect.Value{} + for i, currKey := range keys { + if currKey.IsValid() { + strKey := list[i].Key + prevKey, seen := seenKeys[strKey] + if seen && prevKey.CanInterface() && currKey.CanInterface() { + ambiguous = prevKey.Interface() != currKey.Interface() + if ambiguous { + break + } + } + seenKeys[strKey] = currKey + } + } + if ambiguous { + for i, k := range keys { + if k.IsValid() { + list[i].Key = formatMapKey(k, true, ptrs) + } + } + } + } + + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/report_references.go b/extender/code/vendor/github.com/google/go-cmp/cmp/report_references.go new file mode 100644 index 000000000..d620c2c20 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -0,0 +1,264 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +const ( + pointerDelimPrefix = "⟪" + pointerDelimSuffix = "⟫" +) + +// formatPointer prints the address of the pointer. +func formatPointer(p value.Pointer, withDelims bool) string { + v := p.Uintptr() + if flags.Deterministic { + v = 0xdeadf00f // Only used for stable testing purposes + } + if withDelims { + return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix + } + return formatHex(uint64(v)) +} + +// pointerReferences is a stack of pointers visited so far. +type pointerReferences [][2]value.Pointer + +func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { + if deref && vx.IsValid() { + vx = vx.Addr() + } + if deref && vy.IsValid() { + vy = vy.Addr() + } + switch d { + case diffUnknown, diffIdentical: + pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} + case diffRemoved: + pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} + case diffInserted: + pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} + } + *ps = append(*ps, pp) + return pp +} + +func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { + p = value.PointerOf(v) + for _, pp := range *ps { + if p == pp[0] || p == pp[1] { + return p, true + } + } + *ps = append(*ps, [2]value.Pointer{p, p}) + return p, false +} + +func (ps *pointerReferences) Pop() { + *ps = (*ps)[:len(*ps)-1] +} + +// trunkReferences is metadata for a textNode indicating that the sub-tree +// represents the value for either pointer in a pair of references. +type trunkReferences struct{ pp [2]value.Pointer } + +// trunkReference is metadata for a textNode indicating that the sub-tree +// represents the value for the given pointer reference. +type trunkReference struct{ p value.Pointer } + +// leafReference is metadata for a textNode indicating that the value is +// truncated as it refers to another part of the tree (i.e., a trunk). +type leafReference struct{ p value.Pointer } + +func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { + switch { + case pp[0].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} + case pp[1].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + case pp[0] == pp[1]: + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + default: + return &textWrap{Value: s, Metadata: trunkReferences{pp}} + } +} +func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} +} +func makeLeafReference(p value.Pointer, printAddress bool) textNode { + out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} +} + +// resolveReferences walks the textNode tree searching for any leaf reference +// metadata and resolves each against the corresponding trunk references. +// Since pointer addresses in memory are not particularly readable to the user, +// it replaces each pointer value with an arbitrary and unique reference ID. +func resolveReferences(s textNode) { + var walkNodes func(textNode, func(textNode)) + walkNodes = func(s textNode, f func(textNode)) { + f(s) + switch s := s.(type) { + case *textWrap: + walkNodes(s.Value, f) + case textList: + for _, r := range s { + walkNodes(r.Value, f) + } + } + } + + // Collect all trunks and leaves with reference metadata. + var trunks, leaves []*textWrap + walkNodes(s, func(s textNode) { + if s, ok := s.(*textWrap); ok { + switch s.Metadata.(type) { + case leafReference: + leaves = append(leaves, s) + case trunkReference, trunkReferences: + trunks = append(trunks, s) + } + } + }) + + // No leaf references to resolve. + if len(leaves) == 0 { + return + } + + // Collect the set of all leaf references to resolve. + leafPtrs := make(map[value.Pointer]bool) + for _, leaf := range leaves { + leafPtrs[leaf.Metadata.(leafReference).p] = true + } + + // Collect the set of trunk pointers that are always paired together. + // This allows us to assign a single ID to both pointers for brevity. + // If a pointer in a pair ever occurs by itself or as a different pair, + // then the pair is broken. + pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) + unpair := func(p value.Pointer) { + if !pairedTrunkPtrs[p].IsNil() { + pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half + } + pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + unpair(p.p) // standalone pointer cannot be part of a pair + case trunkReferences: + p0, ok0 := pairedTrunkPtrs[p.pp[0]] + p1, ok1 := pairedTrunkPtrs[p.pp[1]] + switch { + case !ok0 && !ok1: + // Register the newly seen pair. + pairedTrunkPtrs[p.pp[0]] = p.pp[1] + pairedTrunkPtrs[p.pp[1]] = p.pp[0] + case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: + // Exact pair already seen; do nothing. + default: + // Pair conflicts with some other pair; break all pairs. + unpair(p.pp[0]) + unpair(p.pp[1]) + } + } + } + + // Correlate each pointer referenced by leaves to a unique identifier, + // and print the IDs for each trunk that matches those pointers. + var nextID uint + ptrIDs := make(map[value.Pointer]uint) + newID := func() uint { + id := nextID + nextID++ + return id + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + if print := leafPtrs[p.p]; print { + id, ok := ptrIDs[p.p] + if !ok { + id = newID() + ptrIDs[p.p] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } + case trunkReferences: + print0 := leafPtrs[p.pp[0]] + print1 := leafPtrs[p.pp[1]] + if print0 || print1 { + id0, ok0 := ptrIDs[p.pp[0]] + id1, ok1 := ptrIDs[p.pp[1]] + isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] + if isPair { + var id uint + assert(ok0 == ok1) // must be seen together or not at all + if ok0 { + assert(id0 == id1) // must have the same ID + id = id0 + } else { + id = newID() + ptrIDs[p.pp[0]] = id + ptrIDs[p.pp[1]] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } else { + if print0 && !ok0 { + id0 = newID() + ptrIDs[p.pp[0]] = id0 + } + if print1 && !ok1 { + id1 = newID() + ptrIDs[p.pp[1]] = id1 + } + switch { + case print0 && print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) + case print0: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) + case print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) + } + } + } + } + } + + // Update all leaf references with the unique identifier. + for _, leaf := range leaves { + if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { + leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) + } + } +} + +func formatReference(id uint) string { + return fmt.Sprintf("ref#%d", id) +} + +func updateReferencePrefix(prefix, ref string) string { + if prefix == "" { + return pointerDelimPrefix + ref + pointerDelimSuffix + } + suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) + return pointerDelimPrefix + ref + ": " + suffix +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/extender/code/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 000000000..786f67126 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,400 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool + + // QualifiedNames controls whether FormatType uses the fully qualified name + // (including the full package path as opposed to just the package name). + QualifiedNames bool + + // VerbosityLevel controls the amount of output to produce. + // A higher value produces more output. A value of zero or lower produces + // no output (represented using an ellipsis). + // If LimitVerbosity is false, then the level is treated as infinite. + VerbosityLevel int + + // LimitVerbosity specifies that formatting should respect VerbosityLevel. + LimitVerbosity bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + if opts.DiffMode == diffIdentical { + return s // elide type for identical nodes + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := value.TypeString(t, opts.QualifiedNames) + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + } + return &textWrap{Prefix: typeName, Value: wrapParens(s)} +} + +// wrapParens wraps s with a set of parenthesis, but avoids it if the +// wrapped node itself is already surrounded by a pair of parenthesis or braces. +// It handles unwrapping one level of pointer-reference nodes. +func wrapParens(s textNode) textNode { + var refNode *textWrap + if s2, ok := s.(*textWrap); ok { + // Unwrap a single pointer reference node. + switch s2.Metadata.(type) { + case leafReference, trunkReference, trunkReferences: + refNode = s2 + if s3, ok := refNode.Value.(*textWrap); ok { + s2 = s3 + } + } + + // Already has delimiters that make parenthesis unnecessary. + hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") + hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") + if hasParens || hasBraces { + return s + } + } + if refNode != nil { + refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} + return s + } + return &textWrap{Prefix: "(", Value: s, Suffix: ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in ptrs. As pointers are visited, ptrs is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check slice element for cycles. + if parentKind == reflect.Slice { + ptrRef, visited := ptrs.Push(v.Addr()) + if visited { + return makeLeafReference(ptrRef, false) + } + defer ptrs.Pop() + defer func() { out = wrapTrunkReference(ptrRef, false, out) }() + } + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + var prefix, strVal string + func() { + // Swallow and ignore any panics from String or Error. + defer func() { recover() }() + switch v := v.Interface().(type) { + case error: + strVal = v.Error() + prefix = "e" + case fmt.Stringer: + strVal = v.String() + prefix = "s" + } + }() + if prefix != "" { + return opts.formatString(prefix, strVal) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uint8: + if parentKind == reflect.Slice || parentKind == reflect.Array { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uintptr: + return textLine(formatHex(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return opts.formatString("", v.String()) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(value.PointerOf(v), true)) + case reflect.Struct: + var list textList + v := makeAddressable(v) // needed for retrieveUnexportedField + maxLen := v.NumField() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if value.IsZero(vv) { + continue // Elide fields with zero values + } + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sf := t.Field(i) + if supportExporters && !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) + list = append(list, textRecord{Key: sf.Name, Value: s}) + } + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + + // Check whether this is a []byte of text data. + if t.Elem() == reflect.TypeOf(byte(0)) { + b := v.Bytes() + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { + out = opts.formatString("", string(b)) + return opts.WithTypeMode(emitType).FormatType(t, out) + } + } + + fallthrough + case reflect.Array: + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + var list textList + for i := 0; i < v.Len(); i++ { + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) + list = append(list, textRecord{Value: s}) + } + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if t.Kind() == reflect.Slice && opts.PrintAddresses { + header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) + out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} + } + return out + case reflect.Map: + if v.IsNil() { + return textNil + } + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + return makeLeafReference(ptrRef, opts.PrintAddresses) + } + defer ptrs.Pop() + + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sk := formatMapKey(k, false, ptrs) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) + list = append(list, textRecord{Key: sk, Value: sv}) + } + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + return out + case reflect.Ptr: + if v.IsNil() { + return textNil + } + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + out = makeLeafReference(ptrRef, opts.PrintAddresses) + return &textWrap{Prefix: "&", Value: out} + } + defer ptrs.Pop() + + skipType = true // Let the underlying value print the type instead + out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + out = &textWrap{Prefix: "&", Value: out} + return out + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + skipType = true // Print the concrete type instead + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +func (opts formatOptions) formatString(prefix, s string) textNode { + maxLen := len(s) + maxLines := strings.Count(s, "\n") + 1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... + maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + } + + // For multiline strings, use the triple-quote syntax, + // but only use it when printing removed or inserted nodes since + // we only want the extra verbosity for those cases. + lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") + isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') + for i := 0; i < len(lines) && isTripleQuoted; i++ { + lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + line := lines[i] + isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen + } + if isTripleQuoted { + var list textList + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + for i, line := range lines { + if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { + comment := commentString(fmt.Sprintf("%d elided lines", numElided)) + list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) + break + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + return &textWrap{Prefix: "(", Value: list, Suffix: ")"} + } + + // Format the string as a single-line quoted string. + if len(s) > maxLen+len(textEllipsis) { + return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) + } + return textLine(prefix + formatString(s)) +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { + var opts formatOptions + opts.DiffMode = diffIdentical + opts.TypeMode = elideType + opts.PrintAddresses = disambiguate + opts.AvoidStringer = disambiguate + opts.QualifiedNames = disambiguate + s := opts.FormatValue(v, reflect.Map, ptrs).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/report_slices.go b/extender/code/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 000000000..35315dad3 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,448 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0): + return false // Both slice values have to be non-empty + case v.NumIgnored > 0: + return false // Some ignore option was used + case v.NumTransformed > 0: + return false // Some transform option was used + case v.NumCompared > 1: + return false // More than one comparison was used + case v.NumCompared == 1 && v.Type.Name() != "": + // The need for cmp to check applicability of options on every element + // in a slice is a significant performance detriment for large []byte. + // The workaround is to specify Comparer(bytes.Equal), + // which enables cmp to compare []byte more efficiently. + // If they differ, we still want to provide batched diffing. + // The logic disallows named types since they tend to have their own + // String method, with nicer formatting than what this provides. + return false + } + + switch t := v.Type; t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 64 + return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + + // Auto-detect the type of the data. + var isLinedText, isText, isBinary bool + var sx, sy string + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isText = true // Initial estimate, verify later + case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isBinary = true // Initial estimate, verify later + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isText || isBinary { + var numLines, lastLineIdx, maxLineLen int + isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy) + for i, r := range sx + sy { + if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { + isBinary = true + break + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + maxLineLen = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isText = !isBinary + isLinedText = isText && numLines >= 4 && maxLineLen <= 1024 + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isLinedText: + ssx := strings.Split(sx, "\n") + ssy := strings.Split(sy, "\n") + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + + // If possible, use a custom triple-quote (""") syntax for printing + // differences in a string literal. This format is more readable, + // but has edge-cases where differences are visually indistinguishable. + // This format is avoided under the following conditions: + // • A line starts with `"""` + // • A line starts with "..." + // • A line contains non-printable characters + // • Adjacent different lines differ only by whitespace + // + // For example: + // """ + // ... // 3 identical lines + // foo + // bar + // - baz + // + BAZ + // """ + isTripleQuoted := true + prevRemoveLines := map[string]bool{} + prevInsertLines := map[string]bool{} + var list2 textList + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + for _, r := range list { + if !r.Value.Equal(textEllipsis) { + line, _ := strconv.Unquote(string(r.Value.(textLine))) + line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + normLine := strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 // drop whitespace to avoid visually indistinguishable output + } + return r + }, line) + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" + switch r.Diff { + case diffRemoved: + isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] + prevRemoveLines[normLine] = true + case diffInserted: + isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] + prevInsertLines[normLine] = true + } + if !isTripleQuoted { + break + } + r.Value = textLine(line) + r.ElideComma = true + } + if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group + prevRemoveLines = map[string]bool{} + prevInsertLines = map[string]bool{} + } + list2 = append(list2, r) + } + if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { + list2 = list2[:len(list2)-1] // elide single empty line at the end + } + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + if isTripleQuoted { + var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} + switch t.Kind() { + case reflect.String: + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + // Always emit type for slices since the triple-quote syntax + // looks like a string (not a slice). + opts = opts.WithTypeMode(emitType) + out = opts.FormatType(t, out) + } + return out + } + + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "" + + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ss = append(ss, fmt.Sprint(v.Index(i).Uint())) + case reflect.Uint8, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if !isText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf([]byte(nil)) { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { + return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + var numDiffs int + maxLen := -1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + opts.VerbosityLevel-- + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + maxGroup := diffStats{Name: name} + for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + len0 := len(list) + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + numDiffs += len(list) - len0 + } + if maxGroup.IsZero() { + assert(vx.Len() == 0 && vy.Len() == 0) + } else { + list.AppendEllipsis(maxGroup) + } + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats(1).NumIdentical++ + case diff.UniqueX: + lastStats(2).NumRemoved++ + case diff.UniqueY: + lastStats(2).NumInserted++ + case diff.Modified: + lastStats(2).NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = prev.Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/report_text.go b/extender/code/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 000000000..8b12c05cd --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,431 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +const maxColumnLength = 80 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + // The output of Diff is documented as being unstable to provide future + // flexibility in changing the output for more humanly readable reports. + // This logic intentionally introduces instability to the exact output + // so that users can detect accidental reliance on stability early on, + // rather than much later when an actual change to the format occurs. + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" + Metadata interface{} // arbitrary metadata; has no effect on formatting +} + +func (s *textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 *textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(*textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s *textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + ElideComma bool // avoid trailing comma + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := !ds.IsZero() + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, + ) + + // Format lists of simple lists in a batched form. + // If the list is sequence of only textLine values, + // then batch multiple values on a single line. + var isSimple bool + for _, r := range s { + _, isLine := r.Value.(textLine) + isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil + if !isSimple { + break + } + } + if isSimple { + n++ + var batch []byte + emitBatch := func() { + if len(batch) > 0 { + b = n.appendIndent(append(b, '\n'), d) + b = append(b, bytes.TrimRight(batch, " ")...) + batch = batch[:0] + } + } + for _, r := range s { + line := r.Value.(textLine) + if len(batch)+len(line)+len(", ") > maxColumnLength { + emitBatch() + } + batch = append(batch, line...) + batch = append(batch, ", "...) + } + emitBatch() + n-- + return n.appendIndent(append(b, '\n'), d) + } + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.ElideComma { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) IsZero() bool { + s.Name = "" + return s == diffStats{} +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name += "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/extender/code/vendor/github.com/google/go-cmp/cmp/report_value.go b/extender/code/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 000000000..83031a7f5 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/LICENSE b/extender/code/vendor/github.com/google/go-containerregistry/LICENSE new file mode 100644 index 000000000..7a4a3ea24 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/README.md b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/README.md new file mode 100644 index 000000000..1eb17c7ab --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/README.md @@ -0,0 +1,242 @@ +# `authn` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/authn?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/authn) + +This README outlines how we acquire and use credentials when interacting with a registry. + +As much as possible, we attempt to emulate docker's authentication behavior and configuration so that this library "just works" if you've already configured credentials that work with docker; however, when things don't work, a basic understanding of what's going on can help with debugging. + +The official documentation for how docker authentication works is (reasonably) scattered across several different sites and GitHub repositories, so we've tried to summarize the relevant bits here. + +## tl;dr for consumers of this package + +By default, [`pkg/v1/remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) uses [`Anonymous`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#Anonymous) credentials (i.e. _none_), which for most registries will only allow read access to public images. + +To use the credentials found in your docker config file, you can use the [`DefaultKeychain`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#DefaultKeychain), e.g.: + +```go +package main + +import ( + "fmt" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +func main() { + ref, err := name.ParseReference("registry.example.com/private/repo") + if err != nil { + panic(err) + } + + // Fetch the manifest using default credentials. + img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) + if err != nil { + panic(err) + } + + // Prints the digest of registry.example.com/private/repo + fmt.Println(img.Digest) +} +``` + +(If you're only using [gcr.io](https://gcr.io), see the [`pkg/v1/google.Keychain`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/google#Keychain), which emulates [`docker-credential-gcr`](https://github.com/GoogleCloudPlatform/docker-credential-gcr).) + +## The Config File + +This file contains various configuration options for docker and is (by default) located at: +* `$HOME/.docker/config.json` (on linux and darwin), or +* `%USERPROFILE%\.docker\config.json` (on windows). + +You can override this location with the `DOCKER_CONFIG` environment variable. + +### Plaintext + +The config file is where your credentials are stored when you invoke `docker login`, e.g. the contents may look something like this: + +```json +{ + "auths": { + "registry.example.com": { + "auth": "QXp1cmVEaWFtb25kOmh1bnRlcjI=" + } + } +} +``` + +The `auths` map has an entry per registry, and the `auth` field contains your username and password encoded as [HTTP 'Basic' Auth](https://tools.ietf.org/html/rfc7617). + +**NOTE**: This means that your credentials are stored _in plaintext_: + +```bash +$ echo "QXp1cmVEaWFtb25kOmh1bnRlcjI=" | base64 -d +AzureDiamond:hunter2 +``` + +For what it's worth, this config file is equivalent to: + +```json +{ + "auths": { + "registry.example.com": { + "username": "AzureDiamond", + "password": "hunter2" + } + } +} +``` + +... which is useful to know if e.g. your CI system provides you a registry username and password via environment variables and you want to populate this file manually without invoking `docker login`. + +### Helpers + +If you log in like this, docker will warn you that you should use a [credential helper](https://docs.docker.com/engine/reference/commandline/login/#credentials-store), and you should! + +To configure a global credential helper: +```json +{ + "credsStore": "osxkeychain" +} +``` + +To configure a per-registry credential helper: +```json +{ + "credHelpers": { + "gcr.io": "gcr" + } +} +``` + +We use [`github.com/docker/cli/cli/config.Load`](https://godoc.org/github.com/docker/cli/cli/config#Load) to parse the config file and invoke any necessary credential helpers. This handles the logic of taking a [`ConfigFile`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/configfile/file.go#L25-L54) + registry domain and producing an [`AuthConfig`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L3-L22), which determines how we authenticate to the registry. + +## Credential Helpers + +The [credential helper protocol](https://github.com/docker/docker-credential-helpers) allows you to configure a binary that supplies credentials for the registry, rather than hard-coding them in the config file. + +The protocol has several verbs, but the one we most care about is `get`. + +For example, using the following config file: +```json +{ + "credHelpers": { + "gcr.io": "gcr", + "eu.gcr.io": "gcr" + } +} +``` + +To acquire credentials for `gcr.io`, we look in the `credHelpers` map to find +the credential helper for `gcr.io` is `gcr`. By appending that value to +`docker-credential-`, we can get the name of the binary we need to use. + +For this example, that's `docker-credential-gcr`, which must be on our `$PATH`. +We'll then invoke that binary to get credentials: + +```bash +$ echo "gcr.io" | docker-credential-gcr get +{"Username":"_token","Secret":""} +``` + +You can configure the same credential helper for multiple registries, which is +why we need to pass the domain in via STDIN, e.g. if we were trying to access +`eu.gcr.io`, we'd do this instead: + +```bash +$ echo "eu.gcr.io" | docker-credential-gcr get +{"Username":"_token","Secret":""} +``` + +### Debugging credential helpers + +If a credential helper is configured but doesn't seem to be working, it can be +challenging to debug. Implementing a fake credential helper lets you poke around +to make it easier to see where the failure is happening. + +This "implements" a credential helper with hard-coded values: +``` +#!/usr/bin/env bash +echo '{"Username":"","Secret":"hunter2"}' +``` + + +This implements a credential helper that prints the output of +`docker-credential-gcr` to both stderr and whatever called it, which allows you +to snoop on another credential helper: +``` +#!/usr/bin/env bash +docker-credential-gcr $@ | tee >(cat 1>&2) +``` + +Put those files somewhere on your path, naming them e.g. +`docker-credential-hardcoded` and `docker-credential-tee`, then modify the +config file to use them: + +```json +{ + "credHelpers": { + "gcr.io": "tee", + "eu.gcr.io": "hardcoded" + } +} +``` + +The `docker-credential-tee` trick works with both `crane` and `docker`: + +```bash +$ crane manifest gcr.io/google-containers/pause > /dev/null +{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":""} + +$ docker pull gcr.io/google-containers/pause +Using default tag: latest +{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":""} +latest: Pulling from google-containers/pause +a3ed95caeb02: Pull complete +4964c72cd024: Pull complete +Digest: sha256:a78c2d6208eff9b672de43f880093100050983047b7b0afe0217d3656e1b0d5f +Status: Downloaded newer image for gcr.io/google-containers/pause:latest +gcr.io/google-containers/pause:latest +``` + +## The Registry + +There are two methods for authenticating against a registry: +[token](https://docs.docker.com/registry/spec/auth/token/) and +[oauth2](https://docs.docker.com/registry/spec/auth/oauth/). + +Both methods are used to acquire an opaque `Bearer` token (or +[RegistryToken](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L21)) +to use in the `Authorization` header. The registry will return a `401 +Unauthorized` during the [version +check](https://github.com/opencontainers/distribution-spec/blob/2c3975d1f03b67c9a0203199038adea0413f0573/spec.md#api-version-check) +(or during normal operations) with +[Www-Authenticate](https://tools.ietf.org/html/rfc7235#section-4.1) challenge +indicating how to proceed. + +### Token + +If we get back an `AuthConfig` containing a [`Username/Password`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L5-L6) +or +[`Auth`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L7), +we'll use the token method for authentication: + +![basic](../../images/credhelper-basic.svg) + +### OAuth 2 + +If we get back an `AuthConfig` containing an [`IdentityToken`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L18) +we'll use the oauth2 method for authentication: + +![oauth](../../images/credhelper-oauth.svg) + +This happens when a credential helper returns a response with the +[`Username`](https://github.com/docker/docker-credential-helpers/blob/f78081d1f7fef6ad74ad6b79368de6348386e591/credentials/credentials.go#L16) +set to `` (no, that's not a placeholder, the literal string `""`). +It is unclear why: [moby/moby#36926](https://github.com/moby/moby/issues/36926). + +We only support the oauth2 `grant_type` for `refresh_token` ([#629](https://github.com/google/go-containerregistry/issues/629)), +since it's impossible to determine from the registry response whether we should +use oauth, and the token method for authentication is widely implemented by +registries. diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go new file mode 100644 index 000000000..83214957d --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go @@ -0,0 +1,26 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// anonymous implements Authenticator for anonymous authentication. +type anonymous struct{} + +// Authorization implements Authenticator. +func (a *anonymous) Authorization() (*AuthConfig, error) { + return &AuthConfig{}, nil +} + +// Anonymous is a singleton Authenticator for providing anonymous auth. +var Anonymous Authenticator = &anonymous{} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go new file mode 100644 index 000000000..0111f1ae7 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go @@ -0,0 +1,30 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// auth is an Authenticator that simply returns the wrapped AuthConfig. +type auth struct { + config AuthConfig +} + +// FromConfig returns an Authenticator that just returns the given AuthConfig. +func FromConfig(cfg AuthConfig) Authenticator { + return &auth{cfg} +} + +// Authorization implements Authenticator. +func (a *auth) Authorization() (*AuthConfig, error) { + return &a.config, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go new file mode 100644 index 000000000..690e81d05 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go @@ -0,0 +1,36 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// AuthConfig contains authorization information for connecting to a Registry +// Inlined what we use from github.com/docker/cli/cli/config/types +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} + +// Authenticator is used to authenticate Docker transports. +type Authenticator interface { + // Authorization returns the value to use in an http transport's Authorization header. + Authorization() (*AuthConfig, error) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go new file mode 100644 index 000000000..500cb6616 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// Basic implements Authenticator for basic authentication. +type Basic struct { + Username string + Password string +} + +// Authorization implements Authenticator. +func (b *Basic) Authorization() (*AuthConfig, error) { + return &AuthConfig{ + Username: b.Username, + Password: b.Password, + }, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go new file mode 100644 index 000000000..4cf86df92 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go @@ -0,0 +1,27 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// Bearer implements Authenticator for bearer authentication. +type Bearer struct { + Token string `json:"token"` +} + +// Authorization implements Authenticator. +func (b *Bearer) Authorization() (*AuthConfig, error) { + return &AuthConfig{ + RegistryToken: b.Token, + }, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go new file mode 100644 index 000000000..c2a5fc026 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package authn defines different methods of authentication for +// talking to a container registry. +package authn diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/LICENSE b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/LICENSE new file mode 100644 index 000000000..7a4a3ea24 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/README.md b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/README.md new file mode 100644 index 000000000..c9ffdf677 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/README.md @@ -0,0 +1,45 @@ +# `k8schain` + +This is an implementation of the [github.com/google/go-containerregistry]( +https://github.com/google/go-containerregistry) library's [`authn.Keychain`]( +https://godoc.org/github.com/google/go-containerregistry/authn#Keychain) +interface based on the authentication semantics used by the Kubelet when +performing the pull of a Pod's images. + +## Usage + +### Creating a keychain + +A `k8schain` keychain can be built via one of: + +```go +// client is a kubernetes.Interface +kc, err := k8schain.New(ctx, client, k8schain.Options{}) +... + +// This method is suitable for use by controllers or other in-cluster processes. +kc, err := k8schain.NewInCluster(ctx, k8schain.Options{}) +... +``` + +### Using the keychain + +The `k8schain` keychain can be used directly as an `authn.Keychain`, e.g. + +```go + auth, err := kc.Resolve(registry) + if err != nil { + ... + } +``` + +Or, it can be used to override the default keychain used by this process, +which by default follows Docker's keychain semantics: + +```go +func init() { + // Override the default keychain used by this process to follow the + // Kubelet's keychain semantics. + authn.DefaultKeychain = kc +} +``` diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/doc.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/doc.go new file mode 100644 index 000000000..c9ae7f128 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package k8schain exposes an implementation of the authn.Keychain interface +// based on the semantics the Kubelet follows when pulling the images for a +// Pod in Kubernetes. +package k8schain diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/go.mod b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/go.mod new file mode 100644 index 000000000..dbf78b715 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/go.mod @@ -0,0 +1,25 @@ +module github.com/google/go-containerregistry/pkg/authn/k8schain + +go 1.14 + +require ( + github.com/golang/protobuf v1.4.3 // indirect + github.com/google/go-containerregistry v0.4.1-0.20210128200529-19c2b639fab1 + github.com/google/uuid v1.1.2 // indirect + github.com/onsi/gomega v1.10.3 // indirect + github.com/vdemeester/k8s-pkg-credentialprovider v1.19.7 + golang.org/x/net v0.0.0-20210119194325-5f4716e94777 // indirect + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c // indirect + golang.org/x/text v0.3.5 // indirect + google.golang.org/protobuf v1.25.0 // indirect + gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect + k8s.io/api v0.19.7 + k8s.io/apimachinery v0.19.7 + k8s.io/client-go v0.19.7 + k8s.io/klog/v2 v2.5.0 // indirect + k8s.io/kube-openapi v0.0.0-20210113233702-8566a335510f // indirect +) + +replace github.com/google/go-containerregistry => ../../.. diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/go.sum b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/go.sum new file mode 100644 index 000000000..1fc19ec8d --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/go.sum @@ -0,0 +1,753 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v43.0.0+incompatible h1:/wSNCu0e6EsHFR4Qa3vBEBbicaprEHMyyga9g8RTULI= +github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6 h1:5YWtOnckcudzIw8lPPBcWOnmIFWMtHci1ZWAZulMSx0= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.1.0 h1:ISSNzGUh+ZSzizJWOWzs8bwpXIePbGLW4z/AmUFGH5A= +github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.28.2 h1:j5IXG9CdyLfcVfICqo1PXVv+rua+QQHbkXuvuU/JF+8= +github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201223015020-a9a0c2d64694 h1:OVQ4FVXeE6OjzuUifzER+7EulqTqw/94oKSqnooEowQ= +github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201223015020-a9a0c2d64694/go.mod h1:E9uVkkBKf0EaC39j2JVW9EzdNhYvpz6eQIjILHebruk= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017 h1:2HQmlpI3yI9deH18Q6xiSOIjXD4sLI55Y/gfpa8/558= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7 h1:Cvj7S8I4Xpx78KAl6TwTmMHuHlZ/0SM60NUneGJQ7IE= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/vdemeester/k8s-pkg-credentialprovider v1.19.7 h1:MJ5fV2Z0OyIuPvFVs0vi6VjTjxpdK1QT8oX/aWiUjYM= +github.com/vdemeester/k8s-pkg-credentialprovider v1.19.7/go.mod h1:K2nMO14cgZitdwBqdQps9tInJgcaXcU/7q5F59lpbNI= +github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e h1:hq86ru83GdWTlfQFZGO4nZJTU4Bs2wfHl8oFHRaXsfc= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3 h1:DywqrEscRX7O2phNjkT0L6lhHKGBoMLCNX+XcAe7t6s= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.1-0.20200106000736-b8fc810ca6b5/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.1/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.19.7 h1:MpHhls03C2pyzoYcpbe4QqYiiZjdvW+tuWq6TbjV14Y= +k8s.io/api v0.19.7/go.mod h1:KTryDUT3l6Mtv7K2J2486PNL9DBns3wOYTkGR+iz63Y= +k8s.io/apimachinery v0.19.7 h1:nTaEnYVH+i//aPgMA0zTEV2lfVLCV9LextqVd67mulc= +k8s.io/apimachinery v0.19.7/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= +k8s.io/apiserver v0.19.7/go.mod h1:DmWVQggNePspa+vSsVytVbS3iBSDTXdJVt0akfHacKk= +k8s.io/client-go v0.19.7 h1:SoJ4mzZ9LyXBGDe8MmpMznw0CwQ1ITWgsmG7GixvhUU= +k8s.io/client-go v0.19.7/go.mod h1:iytGI7S3kmv6bWnn+bSQUE4VlrEi4YFssvVB7J7Hvqg= +k8s.io/cloud-provider v0.19.7/go.mod h1:aO/VpUwkG+JQN7ZXc5WBLZ5NBXuq/Y5B6vri6U94PZ8= +k8s.io/code-generator v0.19.7 h1:kM/68Y26Z/u//TFc1ggVVcg62te8A2yQh57jBfD0FWQ= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.19.7 h1:ZXS2VRWOWBOc2fTd1zjzhi/b/mkqFT9FDqiNsn1cH30= +k8s.io/component-base v0.19.7/go.mod h1:YX8spPBgwl3I6UGcSdQiEMAqRMSUsGQOW7SEr4+Qa3U= +k8s.io/csi-translation-lib v0.19.7/go.mod h1:WghizPQuzuygr2WdpgN2EjcNpDD2V4EAbxFXsgHgSBk= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.5.0 h1:8mOnjf1RmUPW6KRqQCfYSZq/K20Unmp3IhuZUhxl8KI= +k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20210113233702-8566a335510f h1:ZcWbnalfwIst131Zff7dGd1HQdt+NA9q7z9Fi0vbsHY= +k8s.io/kube-openapi v0.0.0-20210113233702-8566a335510f/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/legacy-cloud-providers v0.19.7 h1:YJ/l/8/Hn56I9m1cudK8aNypRA/NvI/hYhg8fo/CTus= +k8s.io/legacy-cloud-providers v0.19.7/go.mod h1:dsZk4gH9QIwAtHQ8CK0Ps257xlfgoXE3tMkMNhW2xDU= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain.go new file mode 100644 index 000000000..00cec9008 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain.go @@ -0,0 +1,178 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8schain + +import ( + "context" + "fmt" + "sync" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + credentialprovider "github.com/vdemeester/k8s-pkg-credentialprovider" + credentialprovidersecrets "github.com/vdemeester/k8s-pkg-credentialprovider/secrets" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +// Options holds configuration data for guiding credential resolution. +type Options struct { + // Namespace holds the namespace inside of which we are resolving the + // image reference. If empty, "default" is assumed. + Namespace string + // ServiceAccountName holds the serviceaccount as which the container + // will run (scoped to Namespace). If empty, "default" is assumed. + ServiceAccountName string + // ImagePullSecrets holds the names of the Kubernetes secrets (scoped to + // Namespace) containing credential data to use for the image pull. + ImagePullSecrets []string +} + +var ( + keyring credentialprovider.DockerKeyring + once sync.Once +) + +// New returns a new authn.Keychain suitable for resolving image references as +// scoped by the provided Options. It speaks to Kubernetes through the provided +// client interface. +func New(ctx context.Context, client kubernetes.Interface, opt Options) (authn.Keychain, error) { + if opt.Namespace == "" { + opt.Namespace = "default" + } + if opt.ServiceAccountName == "" { + opt.ServiceAccountName = "default" + } + + // Implement a Kubernetes-style authentication keychain. + // This needs to support roughly the following kinds of authentication: + // 1) The implicit authentication from k8s.io/kubernetes/pkg/credentialprovider + // 2) The explicit authentication from imagePullSecrets on Pod + // 3) The semi-implicit authentication where imagePullSecrets are on the + // Pod's service account. + + // First, fetch all of the explicitly declared pull secrets + var pullSecrets []v1.Secret + if client != nil { + for _, name := range opt.ImagePullSecrets { + ps, err := client.CoreV1().Secrets(opt.Namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + pullSecrets = append(pullSecrets, *ps) + } + + // Second, fetch all of the pull secrets attached to our service account. + sa, err := client.CoreV1().ServiceAccounts(opt.Namespace).Get(ctx, opt.ServiceAccountName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + for _, localObj := range sa.ImagePullSecrets { + ps, err := client.CoreV1().Secrets(opt.Namespace).Get(ctx, localObj.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + pullSecrets = append(pullSecrets, *ps) + } + } + + once.Do(func() { + keyring = credentialprovider.NewDockerKeyring() + }) + + // Third, extend the default keyring with the pull secrets. + kr, err := credentialprovidersecrets.MakeDockerKeyring(pullSecrets, keyring) + if err != nil { + return nil, err + } + return &keychain{ + keyring: kr, + }, nil +} + +// NewInCluster returns a new authn.Keychain suitable for resolving image references as +// scoped by the provided Options, constructing a kubernetes.Interface based on in-cluster +// authentication. +func NewInCluster(ctx context.Context, opt Options) (authn.Keychain, error) { + clusterConfig, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + + client, err := kubernetes.NewForConfig(clusterConfig) + if err != nil { + return nil, err + } + return New(ctx, client, opt) +} + +// NewNoClient returns a new authn.Keychain that supports the portions of the K8s keychain +// that don't read ImagePullSecrets. This limits it to roughly the Node-identity-based +// authentication schemes in Kubernetes pkg/credentialprovider. This version of the +// k8schain drops the requirement that we run as a K8s serviceaccount with access to all +// of the on-cluster secrets. This drop in fidelity also diminishes its value as a stand-in +// for Kubernetes authentication, but this actually targets a different use-case. What +// remains is an interesting sweet spot: this variant can serve as a credential provider +// for all of the major public clouds, but in library form (vs. an executable you exec). +func NewNoClient(ctx context.Context) (authn.Keychain, error) { + return New(ctx, nil, Options{}) +} + +type lazyProvider struct { + kc *keychain + image string +} + +// Authorization implements Authenticator. +func (lp lazyProvider) Authorization() (*authn.AuthConfig, error) { + creds, found := lp.kc.keyring.Lookup(lp.image) + if !found || len(creds) < 1 { + return nil, fmt.Errorf("keychain returned no credentials for %q", lp.image) + } + authConfig := creds[0] + return &authn.AuthConfig{ + Username: authConfig.Username, + Password: authConfig.Password, + Auth: authConfig.Auth, + IdentityToken: authConfig.IdentityToken, + RegistryToken: authConfig.RegistryToken, + }, nil +} + +type keychain struct { + keyring credentialprovider.DockerKeyring +} + +// Resolve implements authn.Keychain +func (kc *keychain) Resolve(target authn.Resource) (authn.Authenticator, error) { + var image string + if repo, ok := target.(name.Repository); ok { + image = repo.String() + } else { + // Lookup expects an image reference and we only have a registry. + image = target.RegistryStr() + "/foo/bar" + } + + if creds, found := kc.keyring.Lookup(image); !found || len(creds) < 1 { + return authn.Anonymous, nil + } + // TODO(mattmoor): How to support multiple credentials? + return lazyProvider{ + kc: kc, + image: image, + }, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain_aws.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain_aws.go new file mode 100644 index 000000000..4f245358a --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain_aws.go @@ -0,0 +1,21 @@ +// +build !disable_aws +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8schain + +import ( + // aws credential provider + _ "github.com/vdemeester/k8s-pkg-credentialprovider/aws" +) diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain_azure.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain_azure.go new file mode 100644 index 000000000..3ff3fff25 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain_azure.go @@ -0,0 +1,21 @@ +// +build !disable_azure +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8schain + +import ( + // azure credential provider + _ "github.com/vdemeester/k8s-pkg-credentialprovider/azure" +) diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain_gcp.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain_gcp.go new file mode 100644 index 000000000..67c96c300 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain_gcp.go @@ -0,0 +1,21 @@ +// +build !disable_gcp +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8schain + +import ( + // gcp credential provider + _ "github.com/vdemeester/k8s-pkg-credentialprovider/gcp" +) diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go new file mode 100644 index 000000000..60eebc759 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go @@ -0,0 +1,89 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "os" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/types" + "github.com/google/go-containerregistry/pkg/name" +) + +// Resource represents a registry or repository that can be authenticated against. +type Resource interface { + // String returns the full string representation of the target, e.g. + // gcr.io/my-project or just gcr.io. + String() string + + // RegistryStr returns just the registry portion of the target, e.g. for + // gcr.io/my-project, this should just return gcr.io. This is needed to + // pull out an appropriate hostname. + RegistryStr() string +} + +// Keychain is an interface for resolving an image reference to a credential. +type Keychain interface { + // Resolve looks up the most appropriate credential for the specified target. + Resolve(Resource) (Authenticator, error) +} + +// defaultKeychain implements Keychain with the semantics of the standard Docker +// credential keychain. +type defaultKeychain struct{} + +var ( + // DefaultKeychain implements Keychain by interpreting the docker config file. + DefaultKeychain Keychain = &defaultKeychain{} +) + +const ( + // DefaultAuthKey is the key used for dockerhub in config files, which + // is hardcoded for historical reasons. + DefaultAuthKey = "https://" + name.DefaultRegistry + "/v1/" +) + +// Resolve implements Keychain. +func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) { + cf, err := config.Load(os.Getenv("DOCKER_CONFIG")) + if err != nil { + return nil, err + } + + // See: + // https://github.com/google/ko/issues/90 + // https://github.com/moby/moby/blob/fc01c2b481097a6057bec3cd1ab2d7b4488c50c4/registry/config.go#L397-L404 + key := target.RegistryStr() + if key == name.DefaultRegistry { + key = DefaultAuthKey + } + + cfg, err := cf.GetAuthConfig(key) + if err != nil { + return nil, err + } + + empty := types.AuthConfig{} + if cfg == empty { + return Anonymous, nil + } + return FromConfig(AuthConfig{ + Username: cfg.Username, + Password: cfg.Password, + Auth: cfg.Auth, + IdentityToken: cfg.IdentityToken, + RegistryToken: cfg.RegistryToken, + }), nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go new file mode 100644 index 000000000..3b1804f5d --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go @@ -0,0 +1,41 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +type multiKeychain struct { + keychains []Keychain +} + +// Assert that our multi-keychain implements Keychain. +var _ (Keychain) = (*multiKeychain)(nil) + +// NewMultiKeychain composes a list of keychains into one new keychain. +func NewMultiKeychain(kcs ...Keychain) Keychain { + return &multiKeychain{keychains: kcs} +} + +// Resolve implements Keychain. +func (mk *multiKeychain) Resolve(target Resource) (Authenticator, error) { + for _, kc := range mk.keychains { + auth, err := kc.Resolve(target) + if err != nil { + return nil, err + } + if auth != Anonymous { + return auth, nil + } + } + return Anonymous, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/internal/redact/redact.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/internal/redact/redact.go new file mode 100644 index 000000000..dc9c56b7f --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/internal/redact/redact.go @@ -0,0 +1,35 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package redact contains a simple context signal for redacting requests. +package redact + +import ( + "context" +) + +type contextKey string + +var redactKey = contextKey("redact") + +// NewContext creates a new ctx with the reason for redaction. +func NewContext(ctx context.Context, reason string) context.Context { + return context.WithValue(ctx, redactKey, reason) +} + +// FromContext returns the redaction reason, if any. +func FromContext(ctx context.Context) (bool, string) { + reason, ok := ctx.Value(redactKey).(string) + return ok, reason +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/internal/retry/retry.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/internal/retry/retry.go new file mode 100644 index 000000000..542ba49b4 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/internal/retry/retry.go @@ -0,0 +1,77 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package retry provides methods for retrying operations. It is a thin wrapper +// around k8s.io/apimachinery/pkg/util/wait to make certain operations easier. +package retry + +import ( + "context" + "fmt" + + "github.com/google/go-containerregistry/pkg/internal/retry/wait" +) + +// Backoff is an alias of our own wait.Backoff to avoid name conflicts with +// the kubernetes wait package. Typing retry.Backoff is aesier than fixing +// the wrong import every time you use wait.Backoff. +type Backoff = wait.Backoff + +// This is implemented by several errors in the net package as well as our +// transport.Error. +type temporary interface { + Temporary() bool +} + +// IsTemporary returns true if err implements Temporary() and it returns true. +func IsTemporary(err error) bool { + if err == context.DeadlineExceeded { + return false + } + if te, ok := err.(temporary); ok && te.Temporary() { + return true + } + return false +} + +// IsNotNil returns true if err is not nil. +func IsNotNil(err error) bool { + return err != nil +} + +// Predicate determines whether an error should be retried. +type Predicate func(error) (retry bool) + +// Retry retries a given function, f, until a predicate is satisfied, using +// exponential backoff. If the predicate is never satisfied, it will return the +// last error returned by f. +func Retry(f func() error, p Predicate, backoff wait.Backoff) (err error) { + if f == nil { + return fmt.Errorf("nil f passed to retry") + } + if p == nil { + return fmt.Errorf("nil p passed to retry") + } + + condition := func() (bool, error) { + err = f() + if p(err) { + return false, nil + } + return true, err + } + + wait.ExponentialBackoff(backoff, condition) + return +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/internal/retry/wait/kubernetes_apimachinery_wait.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/internal/retry/wait/kubernetes_apimachinery_wait.go new file mode 100644 index 000000000..ab06e5f10 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/internal/retry/wait/kubernetes_apimachinery_wait.go @@ -0,0 +1,123 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package wait is a subset of k8s.io/apimachinery to avoid conflicts +// in dependencies (specifically, logging). +package wait + +import ( + "errors" + "math/rand" + "time" +) + +// Jitter returns a time.Duration between duration and duration + maxFactor * +// duration. +// +// This allows clients to avoid converging on periodic behavior. If maxFactor +// is 0.0, a suggested default value will be chosen. +func Jitter(duration time.Duration, maxFactor float64) time.Duration { + if maxFactor <= 0.0 { + maxFactor = 1.0 + } + wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) + return wait +} + +// ErrWaitTimeout is returned when the condition exited without success. +var ErrWaitTimeout = errors.New("timed out waiting for the condition") + +// ConditionFunc returns true if the condition is satisfied, or an error +// if the loop should be aborted. +type ConditionFunc func() (done bool, err error) + +// Backoff holds parameters applied to a Backoff function. +type Backoff struct { + // The initial duration. + Duration time.Duration + // Duration is multiplied by factor each iteration, if factor is not zero + // and the limits imposed by Steps and Cap have not been reached. + // Should not be negative. + // The jitter does not contribute to the updates to the duration parameter. + Factor float64 + // The sleep at each iteration is the duration plus an additional + // amount chosen uniformly at random from the interval between + // zero and `jitter*duration`. + Jitter float64 + // The remaining number of iterations in which the duration + // parameter may change (but progress can be stopped earlier by + // hitting the cap). If not positive, the duration is not + // changed. Used for exponential backoff in combination with + // Factor and Cap. + Steps int + // A limit on revised values of the duration parameter. If a + // multiplication by the factor parameter would make the duration + // exceed the cap then the duration is set to the cap and the + // steps parameter is set to zero. + Cap time.Duration +} + +// Step (1) returns an amount of time to sleep determined by the +// original Duration and Jitter and (2) mutates the provided Backoff +// to update its Steps and Duration. +func (b *Backoff) Step() time.Duration { + if b.Steps < 1 { + if b.Jitter > 0 { + return Jitter(b.Duration, b.Jitter) + } + return b.Duration + } + b.Steps-- + + duration := b.Duration + + // calculate the next step + if b.Factor != 0 { + b.Duration = time.Duration(float64(b.Duration) * b.Factor) + if b.Cap > 0 && b.Duration > b.Cap { + b.Duration = b.Cap + b.Steps = 0 + } + } + + if b.Jitter > 0 { + duration = Jitter(duration, b.Jitter) + } + return duration +} + +// ExponentialBackoff repeats a condition check with exponential backoff. +// +// It repeatedly checks the condition and then sleeps, using `backoff.Step()` +// to determine the length of the sleep and adjust Duration and Steps. +// Stops and returns as soon as: +// 1. the condition check returns true or an error, +// 2. `backoff.Steps` checks of the condition have been done, or +// 3. a sleep truncated by the cap on duration has been completed. +// In case (1) the returned error is what the condition function returned. +// In all other cases, ErrWaitTimeout is returned. +func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { + for backoff.Steps > 0 { + if ok, err := condition(); err != nil || ok { + return err + } + if backoff.Steps == 1 { + break + } + time.Sleep(backoff.Step()) + } + return ErrWaitTimeout +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go new file mode 100644 index 000000000..5d25d63d6 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go @@ -0,0 +1,39 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logs exposes the loggers used by this library. +package logs + +import ( + "io/ioutil" + "log" +) + +var ( + // Warn is used to log non-fatal errors. + Warn = log.New(ioutil.Discard, "", log.LstdFlags) + + // Progress is used to log notable, successful events. + Progress = log.New(ioutil.Discard, "", log.LstdFlags) + + // Debug is used to log information that is useful for debugging. + Debug = log.New(ioutil.Discard, "", log.LstdFlags) +) + +// Enabled checks to see if the logger's writer is set to something other +// than ioutil.Discard. This allows callers to avoid expensive operations +// that will end up in /dev/null anyway. +func Enabled(l *log.Logger) bool { + return l.Writer() != ioutil.Discard +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/README.md b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/README.md new file mode 100644 index 000000000..4889b8446 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/README.md @@ -0,0 +1,3 @@ +# `name` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/name?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/name) diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/check.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/check.go new file mode 100644 index 000000000..01b03e562 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/check.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "strings" + "unicode/utf8" +) + +// stripRunesFn returns a function which returns -1 (i.e. a value which +// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise. +func stripRunesFn(runes string) func(rune) rune { + return func(r rune) rune { + if strings.ContainsRune(runes, r) { + return -1 + } + return r + } +} + +// checkElement checks a given named element matches character and length restrictions. +// Returns true if the given element adheres to the given restrictions, false otherwise. +func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error { + numRunes := utf8.RuneCountInString(element) + if (numRunes < minRunes) || (maxRunes < numRunes) { + return NewErrBadName("%s must be between %d and %d runes in length: %s", name, minRunes, maxRunes, element) + } else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 { + return NewErrBadName("%s can only contain the runes `%s`: %s", name, allowedRunes, element) + } + return nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/digest.go new file mode 100644 index 000000000..120dd216a --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/digest.go @@ -0,0 +1,96 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "strings" +) + +const ( + // These have the form: sha256: + // TODO(dekkagaijin): replace with opencontainers/go-digest or docker/distribution's validation. + digestChars = "sh:0123456789abcdef" + digestDelim = "@" +) + +// Digest stores a digest name in a structured form. +type Digest struct { + Repository + digest string + original string +} + +// Ensure Digest implements Reference +var _ Reference = (*Digest)(nil) + +// Context implements Reference. +func (d Digest) Context() Repository { + return d.Repository +} + +// Identifier implements Reference. +func (d Digest) Identifier() string { + return d.DigestStr() +} + +// DigestStr returns the digest component of the Digest. +func (d Digest) DigestStr() string { + return d.digest +} + +// Name returns the name from which the Digest was derived. +func (d Digest) Name() string { + return d.Repository.Name() + digestDelim + d.DigestStr() +} + +// String returns the original input string. +func (d Digest) String() string { + return d.original +} + +func checkDigest(name string) error { + return checkElement("digest", name, digestChars, 7+64, 7+64) +} + +// NewDigest returns a new Digest representing the given name. +func NewDigest(name string, opts ...Option) (Digest, error) { + // Split on "@" + parts := strings.Split(name, digestDelim) + if len(parts) != 2 { + return Digest{}, NewErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name) + } + base := parts[0] + digest := parts[1] + + // Always check that the digest is valid. + if err := checkDigest(digest); err != nil { + return Digest{}, err + } + + tag, err := NewTag(base, opts...) + if err == nil { + base = tag.Repository.Name() + } + + repo, err := NewRepository(base, opts...) + if err != nil { + return Digest{}, err + } + return Digest{ + Repository: repo, + digest: digest, + original: name, + }, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/doc.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/doc.go new file mode 100644 index 000000000..b294794dc --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/doc.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package name defines structured types for representing image references. +// +// What's in a name? For image references, not nearly enough! +// +// Image references look a lot like URLs, but they differ in that they don't +// contain the scheme (http or https), they can end with a :tag or a @digest +// (the latter being validated), and they perform defaulting for missing +// components. +// +// Since image references don't contain the scheme, we do our best to infer +// if we use http or https from the given hostname. We allow http fallback for +// any host that looks like localhost (localhost, 127.0.0.1, ::1), ends in +// ".local", or is in the "private" address space per RFC 1918. For everything +// else, we assume https only. To override this heuristic, use the Insecure +// option. +// +// Image references with a digest signal to us that we should verify the content +// of the image matches the digest. E.g. when pulling a Digest reference, we'll +// calculate the sha256 of the manifest returned by the registry and error out +// if it doesn't match what we asked for. +// +// For defaulting, we interpret "ubuntu" as +// "index.docker.io/library/ubuntu:latest" because we add the missing repo +// "library", the missing registry "index.docker.io", and the missing tag +// "latest". To disable this defaulting, use the StrictValidation option. This +// is useful e.g. to only allow image references that explicitly set a tag or +// digest, so that you don't accidentally pull "latest". +package name diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/errors.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/errors.go new file mode 100644 index 000000000..7847cc5d1 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/errors.go @@ -0,0 +1,37 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import "fmt" + +// ErrBadName is an error for when a bad docker name is supplied. +type ErrBadName struct { + info string +} + +func (e *ErrBadName) Error() string { + return e.info +} + +// NewErrBadName returns a ErrBadName which returns the given formatted string from Error(). +func NewErrBadName(fmtStr string, args ...interface{}) *ErrBadName { + return &ErrBadName{fmt.Sprintf(fmtStr, args...)} +} + +// IsErrBadName returns true if the given error is an ErrBadName. +func IsErrBadName(err error) bool { + _, ok := err.(*ErrBadName) + return ok +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/options.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/options.go new file mode 100644 index 000000000..d14fedcda --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/options.go @@ -0,0 +1,83 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +const ( + // DefaultRegistry is the registry name that will be used if no registry + // provided and the default is not overridden. + DefaultRegistry = "index.docker.io" + defaultRegistryAlias = "docker.io" + + // DefaultTag is the tag name that will be used if no tag provided and the + // default is not overridden. + DefaultTag = "latest" +) + +type options struct { + strict bool // weak by default + insecure bool // secure by default + defaultRegistry string + defaultTag string +} + +func makeOptions(opts ...Option) options { + opt := options{ + defaultRegistry: DefaultRegistry, + defaultTag: DefaultTag, + } + for _, o := range opts { + o(&opt) + } + return opt +} + +// Option is a functional option for name parsing. +type Option func(*options) + +// StrictValidation is an Option that requires image references to be fully +// specified; i.e. no defaulting for registry (dockerhub), repo (library), +// or tag (latest). +func StrictValidation(opts *options) { + opts.strict = true +} + +// WeakValidation is an Option that sets defaults when parsing names, see +// StrictValidation. +func WeakValidation(opts *options) { + opts.strict = false +} + +// Insecure is an Option that allows image references to be fetched without TLS. +func Insecure(opts *options) { + opts.insecure = true +} + +// OptionFn is a function that returns an option. +type OptionFn func() Option + +// WithDefaultRegistry sets the default registry that will be used if one is not +// provided. +func WithDefaultRegistry(r string) Option { + return func(opts *options) { + opts.defaultRegistry = r + } +} + +// WithDefaultTag sets the default tag that will be used if one is not provided. +func WithDefaultTag(t string) Option { + return func(opts *options) { + opts.defaultTag = t + } +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/ref.go new file mode 100644 index 000000000..f9388253f --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/ref.go @@ -0,0 +1,49 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "fmt" +) + +// Reference defines the interface that consumers use when they can +// take either a tag or a digest. +type Reference interface { + fmt.Stringer + + // Context accesses the Repository context of the reference. + Context() Repository + + // Identifier accesses the type-specific portion of the reference. + Identifier() string + + // Name is the fully-qualified reference name. + Name() string + + // Scope is the scope needed to access this reference. + Scope(string) string +} + +// ParseReference parses the string as a reference, either by tag or digest. +func ParseReference(s string, opts ...Option) (Reference, error) { + if t, err := NewTag(s, opts...); err == nil { + return t, nil + } + if d, err := NewDigest(s, opts...); err == nil { + return d, nil + } + return nil, NewErrBadName("could not parse reference: " + s) + +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/registry.go new file mode 100644 index 000000000..d4da7409e --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/registry.go @@ -0,0 +1,136 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "net" + "net/url" + "regexp" + "strings" +) + +// Detect more complex forms of local references. +var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`) + +// Detect the loopback IP (127.0.0.1) +var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1")) + +// Detect the loopback IPV6 (::1) +var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1")) + +// Registry stores a docker registry name in a structured form. +type Registry struct { + insecure bool + registry string +} + +// RegistryStr returns the registry component of the Registry. +func (r Registry) RegistryStr() string { + return r.registry +} + +// Name returns the name from which the Registry was derived. +func (r Registry) Name() string { + return r.RegistryStr() +} + +func (r Registry) String() string { + return r.Name() +} + +// Scope returns the scope required to access the registry. +func (r Registry) Scope(string) string { + // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z + return "registry:catalog:*" +} + +func (r Registry) isRFC1918() bool { + ipStr := strings.Split(r.Name(), ":")[0] + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} { + _, block, _ := net.ParseCIDR(cidr) + if block.Contains(ip) { + return true + } + } + return false +} + +// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined. +func (r Registry) Scheme() string { + if r.insecure { + return "http" + } + if r.isRFC1918() { + return "http" + } + if strings.HasPrefix(r.Name(), "localhost:") { + return "http" + } + if reLocal.MatchString(r.Name()) { + return "http" + } + if reLoopback.MatchString(r.Name()) { + return "http" + } + if reipv6Loopback.MatchString(r.Name()) { + return "http" + } + return "https" +} + +func checkRegistry(name string) error { + // Per RFC 3986, registries (authorities) are required to be prefixed with "//" + // url.Host == hostname[:port] == authority + if url, err := url.Parse("//" + name); err != nil || url.Host != name { + return NewErrBadName("registries must be valid RFC 3986 URI authorities: %s", name) + } + return nil +} + +// NewRegistry returns a Registry based on the given name. +// Strict validation requires explicit, valid RFC 3986 URI authorities to be given. +func NewRegistry(name string, opts ...Option) (Registry, error) { + opt := makeOptions(opts...) + if opt.strict && len(name) == 0 { + return Registry{}, NewErrBadName("strict validation requires the registry to be explicitly defined") + } + + if err := checkRegistry(name); err != nil { + return Registry{}, err + } + + if name == "" { + name = opt.defaultRegistry + } + // Rewrite "docker.io" to "index.docker.io". + // See: https://github.com/google/go-containerregistry/issues/68 + if name == defaultRegistryAlias { + name = DefaultRegistry + } + + return Registry{registry: name, insecure: opt.insecure}, nil +} + +// NewInsecureRegistry returns an Insecure Registry based on the given name. +// +// Deprecated: Use the Insecure Option with NewRegistry instead. +func NewInsecureRegistry(name string, opts ...Option) (Registry, error) { + opts = append(opts, Insecure) + return NewRegistry(name, opts...) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/repository.go new file mode 100644 index 000000000..54367a15c --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/repository.go @@ -0,0 +1,121 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "fmt" + "strings" +) + +const ( + defaultNamespace = "library" + repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./" + regRepoDelimiter = "/" +) + +// Repository stores a docker repository name in a structured form. +type Repository struct { + Registry + repository string +} + +// See https://docs.docker.com/docker-hub/official_repos +func hasImplicitNamespace(repo string, reg Registry) bool { + return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry +} + +// RepositoryStr returns the repository component of the Repository. +func (r Repository) RepositoryStr() string { + if hasImplicitNamespace(r.repository, r.Registry) { + return fmt.Sprintf("%s/%s", defaultNamespace, r.repository) + } + return r.repository +} + +// Name returns the name from which the Repository was derived. +func (r Repository) Name() string { + regName := r.Registry.Name() + if regName != "" { + return regName + regRepoDelimiter + r.RepositoryStr() + } + // TODO: As far as I can tell, this is unreachable. + return r.RepositoryStr() +} + +func (r Repository) String() string { + return r.Name() +} + +// Scope returns the scope required to perform the given action on the registry. +// TODO(jonjohnsonjr): consider moving scopes to a separate package. +func (r Repository) Scope(action string) string { + return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action) +} + +func checkRepository(repository string) error { + return checkElement("repository", repository, repositoryChars, 2, 255) +} + +// NewRepository returns a new Repository representing the given name, according to the given strictness. +func NewRepository(name string, opts ...Option) (Repository, error) { + opt := makeOptions(opts...) + if len(name) == 0 { + return Repository{}, NewErrBadName("a repository name must be specified") + } + + var registry string + repo := name + parts := strings.SplitN(name, regRepoDelimiter, 2) + if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) { + // The first part of the repository is treated as the registry domain + // iff it contains a '.' or ':' character, otherwise it is all repository + // and the domain defaults to Docker Hub. + registry = parts[0] + repo = parts[1] + } + + if err := checkRepository(repo); err != nil { + return Repository{}, err + } + + reg, err := NewRegistry(registry, opts...) + if err != nil { + return Repository{}, err + } + if hasImplicitNamespace(repo, reg) && opt.strict { + return Repository{}, NewErrBadName("strict validation requires the full repository path (missing 'library')") + } + return Repository{reg, repo}, nil +} + +// Tag returns a Tag in this Repository. +func (r Repository) Tag(identifier string) Tag { + t := Tag{ + tag: identifier, + Repository: r, + } + t.original = t.Name() + return t +} + +// Digest returns a Digest in this Repository. +func (r Repository) Digest(identifier string) Digest { + d := Digest{ + digest: identifier, + Repository: r, + } + d.original = d.Name() + return d +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/tag.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/tag.go new file mode 100644 index 000000000..66bd1bec3 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/name/tag.go @@ -0,0 +1,108 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "strings" +) + +const ( + // TODO(dekkagaijin): use the docker/distribution regexes for validation. + tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ" + tagDelim = ":" +) + +// Tag stores a docker tag name in a structured form. +type Tag struct { + Repository + tag string + original string +} + +// Ensure Tag implements Reference +var _ Reference = (*Tag)(nil) + +// Context implements Reference. +func (t Tag) Context() Repository { + return t.Repository +} + +// Identifier implements Reference. +func (t Tag) Identifier() string { + return t.TagStr() +} + +// TagStr returns the tag component of the Tag. +func (t Tag) TagStr() string { + return t.tag +} + +// Name returns the name from which the Tag was derived. +func (t Tag) Name() string { + return t.Repository.Name() + tagDelim + t.TagStr() +} + +// String returns the original input string. +func (t Tag) String() string { + return t.original +} + +// Scope returns the scope required to perform the given action on the tag. +func (t Tag) Scope(action string) string { + return t.Repository.Scope(action) +} + +func checkTag(name string) error { + return checkElement("tag", name, tagChars, 1, 128) +} + +// NewTag returns a new Tag representing the given name, according to the given strictness. +func NewTag(name string, opts ...Option) (Tag, error) { + opt := makeOptions(opts...) + base := name + tag := "" + + // Split on ":" + parts := strings.Split(name, tagDelim) + // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation. + if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) { + base = strings.Join(parts[:len(parts)-1], tagDelim) + tag = parts[len(parts)-1] + } + + // We don't require a tag, but if we get one check it's valid, + // even when not being strict. + // If we are being strict, we want to validate the tag regardless in case + // it's empty. + if tag != "" || opt.strict { + if err := checkTag(tag); err != nil { + return Tag{}, err + } + } + + if tag == "" { + tag = opt.defaultTag + } + + repo, err := NewRepository(base, opts...) + if err != nil { + return Tag{}, err + } + return Tag{ + Repository: repo, + tag: tag, + original: name, + }, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/config.go new file mode 100644 index 000000000..a950b397c --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -0,0 +1,133 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + "time" +) + +// ConfigFile is the configuration file that holds the metadata describing +// how to launch a container. See: +// https://github.com/opencontainers/image-spec/blob/master/config.md +// +// docker_version and os.version are not part of the spec but included +// for backwards compatibility. +type ConfigFile struct { + Architecture string `json:"architecture"` + Author string `json:"author,omitempty"` + Container string `json:"container,omitempty"` + Created Time `json:"created,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + History []History `json:"history,omitempty"` + OS string `json:"os"` + RootFS RootFS `json:"rootfs"` + Config Config `json:"config"` + OSVersion string `json:"os.version,omitempty"` +} + +// History is one entry of a list recording how this container image was built. +type History struct { + Author string `json:"author,omitempty"` + Created Time `json:"created,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Time is a wrapper around time.Time to help with deep copying +type Time struct { + time.Time +} + +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *Time) DeepCopyInto(out *Time) { + *out = *t +} + +// RootFS holds the ordered list of file system deltas that comprise the +// container image's root filesystem. +type RootFS struct { + Type string `json:"type"` + DiffIDs []Hash `json:"diff_ids"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config is a submessage of the config file described as: +// The execution parameters which SHOULD be used as a base when running +// a container using the image. +// The names of the fields in this message are chosen to reflect the JSON +// payload of the Config as defined here: +// https://git.io/vrAET +// and +// https://github.com/opencontainers/image-spec/blob/master/config.md +type Config struct { + AttachStderr bool `json:"AttachStderr,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + Healthcheck *HealthConfig `json:"Healthcheck,omitempty"` + Domainname string `json:"Domainname,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + Env []string `json:"Env,omitempty"` + Hostname string `json:"Hostname,omitempty"` + Image string `json:"Image,omitempty"` + Labels map[string]string `json:"Labels,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Tty bool `json:"Tty,omitempty"` + User string `json:"User,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + MacAddress string `json:"MacAddress,omitempty"` + StopSignal string `json:"StopSignal,omitempty"` + Shell []string `json:"Shell,omitempty"` +} + +// ParseConfigFile parses the io.Reader's contents into a ConfigFile. +func ParseConfigFile(r io.Reader) (*ConfigFile, error) { + cf := ConfigFile{} + if err := json.NewDecoder(r).Decode(&cf); err != nil { + return nil, err + } + return &cf, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go new file mode 100644 index 000000000..7a84736be --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package + +// Package v1 defines structured types for OCI v1 images +package v1 diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md new file mode 100644 index 000000000..8663a830f --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md @@ -0,0 +1,8 @@ +# `empty` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/empty?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/empty) + +The empty packages provides an empty base for constructing a `v1.Image` or `v1.ImageIndex`. +This is especially useful when paired with the [`mutate`](/pkg/v1/mutate) package, +see [`mutate.Append`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate#Append) +and [`mutate.AppendManifests`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate#AppendManifests). diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go new file mode 100644 index 000000000..1a521e9a7 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package empty provides an implementation of v1.Image equivalent to "FROM scratch". +package empty diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go new file mode 100644 index 000000000..c58a06ce0 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go @@ -0,0 +1,52 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package empty + +import ( + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Image is a singleton empty image, think: FROM scratch. +var Image, _ = partial.UncompressedToImage(emptyImage{}) + +type emptyImage struct{} + +// MediaType implements partial.UncompressedImageCore. +func (i emptyImage) MediaType() (types.MediaType, error) { + return types.DockerManifestSchema2, nil +} + +// RawConfigFile implements partial.UncompressedImageCore. +func (i emptyImage) RawConfigFile() ([]byte, error) { + return partial.RawConfigFile(i) +} + +// ConfigFile implements v1.Image. +func (i emptyImage) ConfigFile() (*v1.ConfigFile, error) { + return &v1.ConfigFile{ + RootFS: v1.RootFS{ + // Some clients check this. + Type: "layers", + }, + }, nil +} + +func (i emptyImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { + return nil, fmt.Errorf("LayerByDiffID(%s): empty image", h) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go new file mode 100644 index 000000000..8edab24d4 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go @@ -0,0 +1,63 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package empty + +import ( + "encoding/json" + "errors" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Index is a singleton empty index, think: FROM scratch. +var Index = emptyIndex{} + +type emptyIndex struct{} + +func (i emptyIndex) MediaType() (types.MediaType, error) { + return types.OCIImageIndex, nil +} + +func (i emptyIndex) Digest() (v1.Hash, error) { + return partial.Digest(i) +} + +func (i emptyIndex) Size() (int64, error) { + return partial.Size(i) +} + +func (i emptyIndex) IndexManifest() (*v1.IndexManifest, error) { + return base(), nil +} + +func (i emptyIndex) RawManifest() ([]byte, error) { + return json.Marshal(base()) +} + +func (i emptyIndex) Image(v1.Hash) (v1.Image, error) { + return nil, errors.New("empty index") +} + +func (i emptyIndex) ImageIndex(v1.Hash) (v1.ImageIndex, error) { + return nil, errors.New("empty index") +} + +func base() *v1.IndexManifest { + return &v1.IndexManifest{ + SchemaVersion: 2, + } +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go new file mode 100644 index 000000000..7fdf22e9e --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go @@ -0,0 +1,123 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + "strings" +) + +// Hash is an unqualified digest of some content, e.g. sha256:deadbeef +type Hash struct { + // Algorithm holds the algorithm used to compute the hash. + Algorithm string + + // Hex holds the hex portion of the content hash. + Hex string +} + +// String reverses NewHash returning the string-form of the hash. +func (h Hash) String() string { + return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex) +} + +// NewHash validates the input string is a hash and returns a strongly type Hash object. +func NewHash(s string) (Hash, error) { + h := Hash{} + if err := h.parse(s); err != nil { + return Hash{}, err + } + return h, nil +} + +// MarshalJSON implements json.Marshaler +func (h Hash) MarshalJSON() ([]byte, error) { + return json.Marshal(h.String()) +} + +// UnmarshalJSON implements json.Unmarshaler +func (h *Hash) UnmarshalJSON(data []byte) error { + s, err := strconv.Unquote(string(data)) + if err != nil { + return err + } + return h.parse(s) +} + +// MarshalText implements encoding.TextMarshaler. This is required to use +// v1.Hash as a key in a map when marshalling JSON. +func (h Hash) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. This is required to use +// v1.Hash as a key in a map when unmarshalling JSON. +func (h *Hash) UnmarshalText(text []byte) error { + return h.parse(string(text)) +} + +// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256") +func Hasher(name string) (hash.Hash, error) { + switch name { + case "sha256": + return sha256.New(), nil + default: + return nil, fmt.Errorf("unsupported hash: %q", name) + } +} + +func (h *Hash) parse(unquoted string) error { + parts := strings.Split(unquoted, ":") + if len(parts) != 2 { + return fmt.Errorf("too many parts in hash: %s", unquoted) + } + + rest := strings.TrimLeft(parts[1], "0123456789abcdef") + if len(rest) != 0 { + return fmt.Errorf("found non-hex character in hash: %c", rest[0]) + } + + hasher, err := Hasher(parts[0]) + if err != nil { + return err + } + // Compare the hex to the expected size (2 hex characters per byte) + if len(parts[1]) != hasher.Size()*2 { + return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1]) + } + + h.Algorithm = parts[0] + h.Hex = parts[1] + return nil +} + +// SHA256 computes the Hash of the provided io.Reader's content. +func SHA256(r io.Reader) (Hash, int64, error) { + hasher := sha256.New() + n, err := io.Copy(hasher, r) + if err != nil { + return Hash{}, 0, err + } + return Hash{ + Algorithm: "sha256", + Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), + }, n, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/image.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/image.go new file mode 100644 index 000000000..8de9e4764 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/image.go @@ -0,0 +1,59 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Image defines the interface for interacting with an OCI v1 image. +type Image interface { + // Layers returns the ordered collection of filesystem layers that comprise this image. + // The order of the list is oldest/base layer first, and most-recent/top layer last. + Layers() ([]Layer, error) + + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // Size returns the size of the manifest. + Size() (int64, error) + + // ConfigName returns the hash of the image's config file, also known as + // the Image ID. + ConfigName() (Hash, error) + + // ConfigFile returns this image's config file. + ConfigFile() (*ConfigFile, error) + + // RawConfigFile returns the serialized bytes of ConfigFile(). + RawConfigFile() ([]byte, error) + + // Digest returns the sha256 of this image's manifest. + Digest() (Hash, error) + + // Manifest returns this image's Manifest object. + Manifest() (*Manifest, error) + + // RawManifest returns the serialized bytes of Manifest() + RawManifest() ([]byte, error) + + // LayerByDigest returns a Layer for interacting with a particular layer of + // the image, looking it up by "digest" (the compressed hash). + LayerByDigest(Hash) (Layer, error) + + // LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" + // (the uncompressed hash). + LayerByDiffID(Hash) (Layer, error) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/index.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/index.go new file mode 100644 index 000000000..8e7bc8ebb --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/index.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// ImageIndex defines the interface for interacting with an OCI image index. +type ImageIndex interface { + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // Digest returns the sha256 of this index's manifest. + Digest() (Hash, error) + + // Size returns the size of the manifest. + Size() (int64, error) + + // IndexManifest returns this image index's manifest object. + IndexManifest() (*IndexManifest, error) + + // RawManifest returns the serialized bytes of IndexManifest(). + RawManifest() ([]byte, error) + + // Image returns a v1.Image that this ImageIndex references. + Image(Hash) (Image, error) + + // ImageIndex returns a v1.ImageIndex that this ImageIndex references. + ImageIndex(Hash) (ImageIndex, error) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/internal/and/and_closer.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/internal/and/and_closer.go new file mode 100644 index 000000000..52908e5cf --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/internal/and/and_closer.go @@ -0,0 +1,47 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package and + +import ( + "io" +) + +// ReadCloser implements io.ReadCloser by reading from a particular io.Reader +// and then calling the provided "Close()" method. +type ReadCloser struct { + io.Reader + CloseFunc func() error +} + +var _ io.ReadCloser = (*ReadCloser)(nil) + +// Close implements io.ReadCloser +func (rac *ReadCloser) Close() error { + return rac.CloseFunc() +} + +// WriteCloser implements io.WriteCloser by reading from a particular io.Writer +// and then calling the provided "Close()" method. +type WriteCloser struct { + io.Writer + CloseFunc func() error +} + +var _ io.WriteCloser = (*WriteCloser)(nil) + +// Close implements io.WriteCloser +func (wac *WriteCloser) Close() error { + return wac.CloseFunc() +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/internal/estargz/estargz.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/internal/estargz/estargz.go new file mode 100644 index 000000000..6404d3d95 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/internal/estargz/estargz.go @@ -0,0 +1,54 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package estargz + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/containerd/stargz-snapshotter/estargz" + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +// Assert that what we're returning is an io.ReadCloser +var _ io.ReadCloser = (*estargz.Blob)(nil) + +// ReadCloser reads uncompressed tarball input from the io.ReadCloser and +// returns: +// * An io.ReadCloser from which compressed data may be read, and +// * A v1.Hash with the hash of the estargz table of contents, or +// * An error if the estargz processing encountered a problem. +// +// Refer to estargz for the options: +// https://pkg.go.dev/github.com/containerd/stargz-snapshotter@v0.2.0/estargz#Option +func ReadCloser(r io.ReadCloser, opts ...estargz.Option) (*estargz.Blob, v1.Hash, error) { + defer r.Close() + + // TODO(#876): Avoid buffering into memory. + bs, err := ioutil.ReadAll(r) + if err != nil { + return nil, v1.Hash{}, err + } + br := bytes.NewReader(bs) + + rc, err := estargz.Build(io.NewSectionReader(br, 0, int64(len(bs))), opts...) + if err != nil { + return nil, v1.Hash{}, err + } + + h, err := v1.NewHash(rc.TOCDigest().String()) + return rc, h, err +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/internal/gzip/zip.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/internal/gzip/zip.go new file mode 100644 index 000000000..6f7bfd931 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/internal/gzip/zip.go @@ -0,0 +1,116 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gzip + +import ( + "bufio" + "bytes" + "compress/gzip" + "io" + + "github.com/google/go-containerregistry/pkg/v1/internal/and" +) + +var gzipMagicHeader = []byte{'\x1f', '\x8b'} + +// ReadCloser reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +// This uses gzip.BestSpeed for the compression level. +func ReadCloser(r io.ReadCloser) io.ReadCloser { + return ReadCloserLevel(r, gzip.BestSpeed) +} + +// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +// Refer to compress/gzip for the level: +// https://golang.org/pkg/compress/gzip/#pkg-constants +func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser { + pr, pw := io.Pipe() + + // For highly compressible layers, gzip.Writer will output a very small + // number of bytes per Write(). This is normally fine, but when pushing + // to a registry, we want to ensure that we're taking full advantage of + // the available bandwidth instead of sending tons of tiny writes over + // the wire. + // 64K ought to be small enough for anybody. + bw := bufio.NewWriterSize(pw, 2<<16) + + // Returns err so we can pw.CloseWithError(err) + go func() error { + // TODO(go1.14): Just defer {pw,gw,r}.Close like you'd expect. + // Context: https://golang.org/issue/24283 + gw, err := gzip.NewWriterLevel(bw, level) + if err != nil { + return pw.CloseWithError(err) + } + + if _, err := io.Copy(gw, r); err != nil { + defer r.Close() + defer gw.Close() + return pw.CloseWithError(err) + } + + // Close gzip writer to Flush it and write gzip trailers. + if err := gw.Close(); err != nil { + return pw.CloseWithError(err) + } + + // Flush bufio writer to ensure we write out everything. + if err := bw.Flush(); err != nil { + return pw.CloseWithError(err) + } + + // We dont' really care if these fail. + defer pw.Close() + defer r.Close() + + return nil + }() + + return pr +} + +// UnzipReadCloser reads compressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which uncompessed data may be read. +func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { + gr, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &and.ReadCloser{ + Reader: gr, + CloseFunc: func() error { + // If the unzip fails, then this seems to return the same + // error as the read. We don't want this to interfere with + // us closing the main ReadCloser, since this could leave + // an open file descriptor (fails on Windows). + gr.Close() + return r.Close() + }, + }, nil +} + +// Is detects whether the input stream is compressed. +func Is(r io.Reader) (bool, error) { + magicHeader := make([]byte, 2) + n, err := r.Read(magicHeader) + if n == 0 && err == io.EOF { + return false, nil + } + if err != nil { + return false, err + } + return bytes.Equal(magicHeader, gzipMagicHeader), nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/internal/verify/verify.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/internal/verify/verify.go new file mode 100644 index 000000000..366b6eef4 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/internal/verify/verify.go @@ -0,0 +1,62 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "encoding/hex" + "fmt" + "hash" + "io" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/internal/and" +) + +type verifyReader struct { + inner io.Reader + hasher hash.Hash + expected v1.Hash +} + +// Read implements io.Reader +func (vc *verifyReader) Read(b []byte) (int, error) { + n, err := vc.inner.Read(b) + if err == io.EOF { + got := hex.EncodeToString(vc.hasher.Sum(make([]byte, 0, vc.hasher.Size()))) + if want := vc.expected.Hex; got != want { + return n, fmt.Errorf("error verifying %s checksum; got %q, want %q", + vc.expected.Algorithm, got, want) + } + } + return n, err +} + +// ReadCloser wraps the given io.ReadCloser to verify that its contents match +// the provided v1.Hash before io.EOF is returned. +func ReadCloser(r io.ReadCloser, h v1.Hash) (io.ReadCloser, error) { + w, err := v1.Hasher(h.Algorithm) + if err != nil { + return nil, err + } + r2 := io.TeeReader(r, w) + return &and.ReadCloser{ + Reader: &verifyReader{ + inner: r2, + hasher: w, + expected: h, + }, + CloseFunc: r.Close, + }, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go new file mode 100644 index 000000000..57447d263 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "io" + + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Layer is an interface for accessing the properties of a particular layer of a v1.Image +type Layer interface { + // Digest returns the Hash of the compressed layer. + Digest() (Hash, error) + + // DiffID returns the Hash of the uncompressed layer. + DiffID() (Hash, error) + + // Compressed returns an io.ReadCloser for the compressed layer contents. + Compressed() (io.ReadCloser, error) + + // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. + Uncompressed() (io.ReadCloser, error) + + // Size returns the compressed size of the Layer. + Size() (int64, error) + + // MediaType returns the media type of the Layer. + MediaType() (types.MediaType, error) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/README.md b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/README.md new file mode 100644 index 000000000..54bee6d9f --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/README.md @@ -0,0 +1,5 @@ +# `layout` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/layout?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/layout) + +The `layout` package implements support for interacting with an [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/master/image-layout.md). diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/blob.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/blob.go new file mode 100644 index 000000000..ba90d4cdb --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/blob.go @@ -0,0 +1,38 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import ( + "io" + "io/ioutil" + "os" + + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +// Blob returns a blob with the given hash from the Path. +func (l Path) Blob(h v1.Hash) (io.ReadCloser, error) { + return os.Open(l.blobPath(h)) +} + +// Bytes is a convenience function to return a blob from the Path as +// a byte slice. +func (l Path) Bytes(h v1.Hash) ([]byte, error) { + return ioutil.ReadFile(l.blobPath(h)) +} + +func (l Path) blobPath(h v1.Hash) string { + return l.path("blobs", h.Algorithm, h.Hex) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/doc.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/doc.go new file mode 100644 index 000000000..d80d27363 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package layout provides facilities for reading/writing artifacts from/to +// an OCI image layout on disk, see: +// +// https://github.com/opencontainers/image-spec/blob/master/image-layout.md +package layout diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/image.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/image.go new file mode 100644 index 000000000..7c76a10cb --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/image.go @@ -0,0 +1,131 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import ( + "fmt" + "io" + "sync" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type layoutImage struct { + path Path + desc v1.Descriptor + manifestLock sync.Mutex // Protects rawManifest + rawManifest []byte +} + +var _ partial.CompressedImageCore = (*layoutImage)(nil) + +// Image reads a v1.Image with digest h from the Path. +func (l Path) Image(h v1.Hash) (v1.Image, error) { + ii, err := l.ImageIndex() + if err != nil { + return nil, err + } + + return ii.Image(h) +} + +func (li *layoutImage) MediaType() (types.MediaType, error) { + return li.desc.MediaType, nil +} + +// Implements WithManifest for partial.Blobset. +func (li *layoutImage) Manifest() (*v1.Manifest, error) { + return partial.Manifest(li) +} + +func (li *layoutImage) RawManifest() ([]byte, error) { + li.manifestLock.Lock() + defer li.manifestLock.Unlock() + if li.rawManifest != nil { + return li.rawManifest, nil + } + + b, err := li.path.Bytes(li.desc.Digest) + if err != nil { + return nil, err + } + + li.rawManifest = b + return li.rawManifest, nil +} + +func (li *layoutImage) RawConfigFile() ([]byte, error) { + manifest, err := li.Manifest() + if err != nil { + return nil, err + } + + return li.path.Bytes(manifest.Config.Digest) +} + +func (li *layoutImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { + manifest, err := li.Manifest() + if err != nil { + return nil, err + } + + if h == manifest.Config.Digest { + return partial.CompressedLayer(&compressedBlob{ + path: li.path, + desc: manifest.Config, + }), nil + } + + for _, desc := range manifest.Layers { + if h == desc.Digest { + switch desc.MediaType { + case types.OCILayer, types.DockerLayer: + return partial.CompressedToLayer(&compressedBlob{ + path: li.path, + desc: desc, + }) + default: + // TODO: We assume everything is a compressed blob, but that might not be true. + // TODO: Handle foreign layers. + return nil, fmt.Errorf("unexpected media type: %v for layer: %v", desc.MediaType, desc.Digest) + } + } + } + + return nil, fmt.Errorf("could not find layer in image: %s", h) +} + +type compressedBlob struct { + path Path + desc v1.Descriptor +} + +func (b *compressedBlob) Digest() (v1.Hash, error) { + return b.desc.Digest, nil +} + +func (b *compressedBlob) Compressed() (io.ReadCloser, error) { + return b.path.Blob(b.desc.Digest) +} + +func (b *compressedBlob) Size() (int64, error) { + return b.desc.Size, nil +} + +func (b *compressedBlob) MediaType() (types.MediaType, error) { + return b.desc.MediaType, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/index.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/index.go new file mode 100644 index 000000000..6b7da2c22 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/index.go @@ -0,0 +1,161 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +var _ v1.ImageIndex = (*layoutIndex)(nil) + +type layoutIndex struct { + mediaType types.MediaType + path Path + rawIndex []byte +} + +// ImageIndexFromPath is a convenience function which constructs a Path and returns its v1.ImageIndex. +func ImageIndexFromPath(path string) (v1.ImageIndex, error) { + lp, err := FromPath(path) + if err != nil { + return nil, err + } + return lp.ImageIndex() +} + +// ImageIndex returns a v1.ImageIndex for the Path. +func (l Path) ImageIndex() (v1.ImageIndex, error) { + rawIndex, err := ioutil.ReadFile(l.path("index.json")) + if err != nil { + return nil, err + } + + idx := &layoutIndex{ + mediaType: types.OCIImageIndex, + path: l, + rawIndex: rawIndex, + } + + return idx, nil +} + +func (i *layoutIndex) MediaType() (types.MediaType, error) { + return i.mediaType, nil +} + +func (i *layoutIndex) Digest() (v1.Hash, error) { + return partial.Digest(i) +} + +func (i *layoutIndex) Size() (int64, error) { + return partial.Size(i) +} + +func (i *layoutIndex) IndexManifest() (*v1.IndexManifest, error) { + var index v1.IndexManifest + err := json.Unmarshal(i.rawIndex, &index) + return &index, err +} + +func (i *layoutIndex) RawManifest() ([]byte, error) { + return i.rawIndex, nil +} + +func (i *layoutIndex) Image(h v1.Hash) (v1.Image, error) { + // Look up the digest in our manifest first to return a better error. + desc, err := i.findDescriptor(h) + if err != nil { + return nil, err + } + + if !isExpectedMediaType(desc.MediaType, types.OCIManifestSchema1, types.DockerManifestSchema2) { + return nil, fmt.Errorf("unexpected media type for %v: %s", h, desc.MediaType) + } + + img := &layoutImage{ + path: i.path, + desc: *desc, + } + return partial.CompressedToImage(img) +} + +func (i *layoutIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + // Look up the digest in our manifest first to return a better error. + desc, err := i.findDescriptor(h) + if err != nil { + return nil, err + } + + if !isExpectedMediaType(desc.MediaType, types.OCIImageIndex, types.DockerManifestList) { + return nil, fmt.Errorf("unexpected media type for %v: %s", h, desc.MediaType) + } + + rawIndex, err := i.path.Bytes(h) + if err != nil { + return nil, err + } + + return &layoutIndex{ + mediaType: desc.MediaType, + path: i.path, + rawIndex: rawIndex, + }, nil +} + +func (i *layoutIndex) Blob(h v1.Hash) (io.ReadCloser, error) { + return i.path.Blob(h) +} + +func (i *layoutIndex) findDescriptor(h v1.Hash) (*v1.Descriptor, error) { + im, err := i.IndexManifest() + if err != nil { + return nil, err + } + + if h == (v1.Hash{}) { + if len(im.Manifests) != 1 { + return nil, errors.New("oci layout must contain only a single image to be used with layout.Image") + } + return &(im.Manifests)[0], nil + } + + for _, desc := range im.Manifests { + if desc.Digest == h { + return &desc, nil + } + } + + return nil, fmt.Errorf("could not find descriptor in index: %s", h) +} + +// TODO: Pull this out into methods on types.MediaType? e.g. instead, have: +// * mt.IsIndex() +// * mt.IsImage() +func isExpectedMediaType(mt types.MediaType, expected ...types.MediaType) bool { + for _, allowed := range expected { + if mt == allowed { + return true + } + } + return false +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go new file mode 100644 index 000000000..a031ff5ae --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go @@ -0,0 +1,25 @@ +// Copyright 2019 The original author or authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import "path/filepath" + +// Path represents an OCI image layout rooted in a file system path +type Path string + +func (l Path) path(elem ...string) string { + complete := []string{string(l)} + return filepath.Join(append(complete, elem...)...) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/options.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/options.go new file mode 100644 index 000000000..5569e51de --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/options.go @@ -0,0 +1,42 @@ +package layout + +import v1 "github.com/google/go-containerregistry/pkg/v1" + +// Option is a functional option for Layout. +// +// TODO: We'll need to change this signature to support Sparse/Thin images. +// Or, alternatively, wrap it in a sparse.Image that returns an empty list for layers? +type Option func(*v1.Descriptor) error + +// WithAnnotations adds annotations to the artifact descriptor. +func WithAnnotations(annotations map[string]string) Option { + return func(desc *v1.Descriptor) error { + if desc.Annotations == nil { + desc.Annotations = make(map[string]string) + } + for k, v := range annotations { + desc.Annotations[k] = v + } + + return nil + } +} + +// WithURLs adds urls to the artifact descriptor. +func WithURLs(urls []string) Option { + return func(desc *v1.Descriptor) error { + if desc.URLs == nil { + desc.URLs = []string{} + } + desc.URLs = append(desc.URLs, urls...) + return nil + } +} + +// WithPlatform sets the platform of the artifact descriptor. +func WithPlatform(platform v1.Platform) Option { + return func(desc *v1.Descriptor) error { + desc.Platform = &platform + return nil + } +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/read.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/read.go new file mode 100644 index 000000000..796abc7dd --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/read.go @@ -0,0 +1,32 @@ +// Copyright 2019 The original author or authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import ( + "os" + "path/filepath" +) + +// FromPath reads an OCI image layout at path and constructs a layout.Path. +func FromPath(path string) (Path, error) { + // TODO: check oci-layout exists + + _, err := os.Stat(filepath.Join(path, "index.json")) + if err != nil { + return "", err + } + + return Path(path), nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go new file mode 100644 index 000000000..2f0a162cd --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go @@ -0,0 +1,400 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/match" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" + "golang.org/x/sync/errgroup" +) + +var layoutFile = `{ + "imageLayoutVersion": "1.0.0" +}` + +// AppendImage writes a v1.Image to the Path and updates +// the index.json to reference it. +func (l Path) AppendImage(img v1.Image, options ...Option) error { + if err := l.WriteImage(img); err != nil { + return err + } + + mt, err := img.MediaType() + if err != nil { + return err + } + + d, err := img.Digest() + if err != nil { + return err + } + + manifest, err := img.RawManifest() + if err != nil { + return err + } + + desc := v1.Descriptor{ + MediaType: mt, + Size: int64(len(manifest)), + Digest: d, + } + + for _, opt := range options { + if err := opt(&desc); err != nil { + return err + } + } + + return l.AppendDescriptor(desc) +} + +// AppendIndex writes a v1.ImageIndex to the Path and updates +// the index.json to reference it. +func (l Path) AppendIndex(ii v1.ImageIndex, options ...Option) error { + if err := l.WriteIndex(ii); err != nil { + return err + } + + mt, err := ii.MediaType() + if err != nil { + return err + } + + d, err := ii.Digest() + if err != nil { + return err + } + + manifest, err := ii.RawManifest() + if err != nil { + return err + } + + desc := v1.Descriptor{ + MediaType: mt, + Size: int64(len(manifest)), + Digest: d, + } + + for _, opt := range options { + if err := opt(&desc); err != nil { + return err + } + } + + return l.AppendDescriptor(desc) +} + +// AppendDescriptor adds a descriptor to the index.json of the Path. +func (l Path) AppendDescriptor(desc v1.Descriptor) error { + ii, err := l.ImageIndex() + if err != nil { + return err + } + + index, err := ii.IndexManifest() + if err != nil { + return err + } + + index.Manifests = append(index.Manifests, desc) + + rawIndex, err := json.MarshalIndent(index, "", " ") + if err != nil { + return err + } + + return l.WriteFile("index.json", rawIndex, os.ModePerm) +} + +// ReplaceImage writes a v1.Image to the Path and updates +// the index.json to reference it, replacing any existing one that matches matcher, if found. +func (l Path) ReplaceImage(img v1.Image, matcher match.Matcher, options ...Option) error { + if err := l.WriteImage(img); err != nil { + return err + } + + return l.replaceDescriptor(img, matcher, options...) +} + +// ReplaceIndex writes a v1.ImageIndex to the Path and updates +// the index.json to reference it, replacing any existing one that matches matcher, if found. +func (l Path) ReplaceIndex(ii v1.ImageIndex, matcher match.Matcher, options ...Option) error { + if err := l.WriteIndex(ii); err != nil { + return err + } + + return l.replaceDescriptor(ii, matcher, options...) +} + +// replaceDescriptor adds a descriptor to the index.json of the Path, replacing +// any one matching matcher, if found. +func (l Path) replaceDescriptor(append mutate.Appendable, matcher match.Matcher, options ...Option) error { + ii, err := l.ImageIndex() + if err != nil { + return err + } + + desc, err := partial.Descriptor(append) + if err != nil { + return err + } + + for _, opt := range options { + if err := opt(desc); err != nil { + return err + } + } + + add := mutate.IndexAddendum{ + Add: append, + Descriptor: *desc, + } + ii = mutate.AppendManifests(mutate.RemoveManifests(ii, matcher), add) + + index, err := ii.IndexManifest() + if err != nil { + return err + } + + rawIndex, err := json.MarshalIndent(index, "", " ") + if err != nil { + return err + } + + return l.WriteFile("index.json", rawIndex, os.ModePerm) +} + +// RemoveDescriptors removes any descriptors that match the match.Matcher from the index.json of the Path. +func (l Path) RemoveDescriptors(matcher match.Matcher) error { + ii, err := l.ImageIndex() + if err != nil { + return err + } + ii = mutate.RemoveManifests(ii, matcher) + + index, err := ii.IndexManifest() + if err != nil { + return err + } + + rawIndex, err := json.MarshalIndent(index, "", " ") + if err != nil { + return err + } + + return l.WriteFile("index.json", rawIndex, os.ModePerm) +} + +// WriteFile write a file with arbitrary data at an arbitrary location in a v1 +// layout. Used mostly internally to write files like "oci-layout" and +// "index.json", also can be used to write other arbitrary files. Do *not* use +// this to write blobs. Use only WriteBlob() for that. +func (l Path) WriteFile(name string, data []byte, perm os.FileMode) error { + if err := os.MkdirAll(l.path(), os.ModePerm); err != nil && !os.IsExist(err) { + return err + } + + return ioutil.WriteFile(l.path(name), data, perm) + +} + +// WriteBlob copies a file to the blobs/ directory in the Path from the given ReadCloser at +// blobs/{hash.Algorithm}/{hash.Hex}. +func (l Path) WriteBlob(hash v1.Hash, r io.ReadCloser) error { + dir := l.path("blobs", hash.Algorithm) + if err := os.MkdirAll(dir, os.ModePerm); err != nil && !os.IsExist(err) { + return err + } + + file := filepath.Join(dir, hash.Hex) + if _, err := os.Stat(file); err == nil { + // Blob already exists, that's fine. + return nil + } + w, err := os.Create(file) + if err != nil { + return err + } + defer w.Close() + + _, err = io.Copy(w, r) + return err +} + +// TODO: A streaming version of WriteBlob so we don't have to know the hash +// before we write it. + +// TODO: For streaming layers we should write to a tmp file then Rename to the +// final digest. +func (l Path) writeLayer(layer v1.Layer) error { + d, err := layer.Digest() + if err != nil { + return err + } + + r, err := layer.Compressed() + if err != nil { + return err + } + + return l.WriteBlob(d, r) +} + +// WriteImage writes an image, including its manifest, config and all of its +// layers, to the blobs directory. If any blob already exists, as determined by +// the hash filename, does not write it. +// This function does *not* update the `index.json` file. If you want to write the +// image and also update the `index.json`, call AppendImage(), which wraps this +// and also updates the `index.json`. +func (l Path) WriteImage(img v1.Image) error { + layers, err := img.Layers() + if err != nil { + return err + } + + // Write the layers concurrently. + var g errgroup.Group + for _, layer := range layers { + layer := layer + g.Go(func() error { + return l.writeLayer(layer) + }) + } + if err := g.Wait(); err != nil { + return err + } + + // Write the config. + cfgName, err := img.ConfigName() + if err != nil { + return err + } + cfgBlob, err := img.RawConfigFile() + if err != nil { + return err + } + if err := l.WriteBlob(cfgName, ioutil.NopCloser(bytes.NewReader(cfgBlob))); err != nil { + return err + } + + // Write the img manifest. + d, err := img.Digest() + if err != nil { + return err + } + manifest, err := img.RawManifest() + if err != nil { + return err + } + + return l.WriteBlob(d, ioutil.NopCloser(bytes.NewReader(manifest))) +} + +func (l Path) writeIndexToFile(indexFile string, ii v1.ImageIndex) error { + index, err := ii.IndexManifest() + if err != nil { + return err + } + + // Walk the descriptors and write any v1.Image or v1.ImageIndex that we find. + // If we come across something we don't expect, just write it as a blob. + for _, desc := range index.Manifests { + switch desc.MediaType { + case types.OCIImageIndex, types.DockerManifestList: + ii, err := ii.ImageIndex(desc.Digest) + if err != nil { + return err + } + if err := l.WriteIndex(ii); err != nil { + return err + } + case types.OCIManifestSchema1, types.DockerManifestSchema2: + img, err := ii.Image(desc.Digest) + if err != nil { + return err + } + if err := l.WriteImage(img); err != nil { + return err + } + default: + // TODO: The layout could reference arbitrary things, which we should + // probably just pass through. + } + } + + rawIndex, err := ii.RawManifest() + if err != nil { + return err + } + + return l.WriteFile(indexFile, rawIndex, os.ModePerm) +} + +// WriteIndex writes an index to the blobs directory. Walks down the children, +// including its children manifests and/or indexes, and down the tree until all of +// config and all layers, have been written. If any blob already exists, as determined by +// the hash filename, does not write it. +// This function does *not* update the `index.json` file. If you want to write the +// index and also update the `index.json`, call AppendIndex(), which wraps this +// and also updates the `index.json`. +func (l Path) WriteIndex(ii v1.ImageIndex) error { + // Always just write oci-layout file, since it's small. + if err := l.WriteFile("oci-layout", []byte(layoutFile), os.ModePerm); err != nil { + return err + } + + h, err := ii.Digest() + if err != nil { + return err + } + + indexFile := filepath.Join("blobs", h.Algorithm, h.Hex) + return l.writeIndexToFile(indexFile, ii) + +} + +// Write constructs a Path at path from an ImageIndex. +// +// The contents are written in the following format: +// At the top level, there is: +// One oci-layout file containing the version of this image-layout. +// One index.json file listing descriptors for the contained images. +// Under blobs/, there is, for each image: +// One file for each layer, named after the layer's SHA. +// One file for each config blob, named after its SHA. +// One file for each manifest blob, named after its SHA. +func Write(path string, ii v1.ImageIndex) (Path, error) { + lp := Path(path) + // Always just write oci-layout file, since it's small. + if err := lp.WriteFile("oci-layout", []byte(layoutFile), os.ModePerm); err != nil { + return "", err + } + + // TODO create blobs/ in case there is a blobs file which would prevent the directory from being created + + return lp, lp.writeIndexToFile("index.json", ii) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go new file mode 100644 index 000000000..36c341df8 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go @@ -0,0 +1,67 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Manifest represents the OCI image manifest in a structured way. +type Manifest struct { + SchemaVersion int64 `json:"schemaVersion,omitempty"` + MediaType types.MediaType `json:"mediaType"` + Config Descriptor `json:"config"` + Layers []Descriptor `json:"layers"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// IndexManifest represents an OCI image index in a structured way. +type IndexManifest struct { + SchemaVersion int64 `json:"schemaVersion"` + MediaType types.MediaType `json:"mediaType,omitempty"` + Manifests []Descriptor `json:"manifests"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// Descriptor holds a reference from the manifest to one of its constituent elements. +type Descriptor struct { + MediaType types.MediaType `json:"mediaType"` + Size int64 `json:"size"` + Digest Hash `json:"digest"` + URLs []string `json:"urls,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Platform *Platform `json:"platform,omitempty"` +} + +// ParseManifest parses the io.Reader's contents into a Manifest. +func ParseManifest(r io.Reader) (*Manifest, error) { + m := Manifest{} + if err := json.NewDecoder(r).Decode(&m); err != nil { + return nil, err + } + return &m, nil +} + +// ParseIndexManifest parses the io.Reader's contents into an IndexManifest. +func ParseIndexManifest(r io.Reader) (*IndexManifest, error) { + im := IndexManifest{} + if err := json.NewDecoder(r).Decode(&im); err != nil { + return nil, err + } + return &im, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go new file mode 100644 index 000000000..0f886667a --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go @@ -0,0 +1,90 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package match provides functionality for conveniently matching a v1.Descriptor. +package match + +import ( + v1 "github.com/google/go-containerregistry/pkg/v1" + imagespec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Matcher function that is given a v1.Descriptor, and returns whether or +// not it matches a given rule. Can match on anything it wants in the Descriptor. +type Matcher func(desc v1.Descriptor) bool + +// Name returns a match.Matcher that matches based on the value of the +// "org.opencontainers.image.ref.name" annotation: +// github.com/opencontainers/image-spec/blob/v1.0.1/annotations.md#pre-defined-annotation-keys +func Name(name string) Matcher { + return Annotation(imagespec.AnnotationRefName, name) +} + +// Annotation returns a match.Matcher that matches based on the provided annotation. +func Annotation(key, value string) Matcher { + return func(desc v1.Descriptor) bool { + if desc.Annotations == nil { + return false + } + if aValue, ok := desc.Annotations[key]; ok && aValue == value { + return true + } + return false + } +} + +// Platforms returns a match.Matcher that matches on any one of the provided platforms. +// Ignores any descriptors that do not have a platform. +func Platforms(platforms ...v1.Platform) Matcher { + return func(desc v1.Descriptor) bool { + if desc.Platform == nil { + return false + } + for _, platform := range platforms { + if desc.Platform.Equals(platform) { + return true + } + } + return false + } +} + +// MediaTypes returns a match.Matcher that matches at least one of the provided media types. +func MediaTypes(mediaTypes ...string) Matcher { + mts := map[string]bool{} + for _, media := range mediaTypes { + mts[media] = true + } + return func(desc v1.Descriptor) bool { + if desc.MediaType == "" { + return false + } + if _, ok := mts[string(desc.MediaType)]; ok { + return true + } + return false + } +} + +// Digests returns a match.Matcher that matches at least one of the provided Digests +func Digests(digests ...v1.Hash) Matcher { + digs := map[v1.Hash]bool{} + for _, digest := range digests { + digs[digest] = true + } + return func(desc v1.Descriptor) bool { + _, ok := digs[desc.Digest] + return ok + } +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md new file mode 100644 index 000000000..19e161243 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md @@ -0,0 +1,56 @@ +# `mutate` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate) + +The `v1.Image`, `v1.ImageIndex`, and `v1.Layer` interfaces provide only +accessor methods, so they are essentially immutable. If you want to change +something about them, you need to produce a new instance of that interface. + +A common use case for this library is to read an image from somewhere (a source), +change something about it, and write the image somewhere else (a sink). + +Graphically, this looks something like: + +

+ +

+ +## Mutations + +This is obviously not a comprehensive set of useful transformations (PRs welcome!), +but a rough summary of what the `mutate` package currently does: + +### `Config` and `ConfigFile` + +These allow you to change the [image configuration](https://github.com/opencontainers/image-spec/blob/master/config.md#properties), +e.g. to change the entrypoint, environment, author, etc. + +### `Time`, `Canonical`, and `CreatedAt` + +These are useful in the context of [reproducible builds](https://reproducible-builds.org/), +where you may want to strip timestamps and other non-reproducible information. + +### `Append`, `AppendLayers`, and `AppendManifests` + +These functions allow the extension of a `v1.Image` or `v1.ImageIndex` with +new layers or manifests. + +For constructing an image `FROM scratch`, see the [`empty`](/pkg/v1/empty) package. + +### `MediaType` and `IndexMediaType` + +Sometimes, it is necessary to change the media type of an image or index, +e.g. to appease a registry with strict validation of images (_looking at you, GCR_). + +### `Rebase` + +Rebase has [its own README](/cmd/crane/rebase.md). + +This is the underlying implementation of [`crane rebase`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_rebase.md). + +### `Extract` + +Extract will flatten an image filesystem into a single tar stream, +respecting whiteout files. + +This is the underlying implementation of [`crane export`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_export.md). diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go new file mode 100644 index 000000000..dfbd9951e --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mutate provides facilities for mutating v1.Images of any kind. +package mutate diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go new file mode 100644 index 000000000..14b9c70a5 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go @@ -0,0 +1,270 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "bytes" + "encoding/json" + "errors" + "strings" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type image struct { + base v1.Image + adds []Addendum + + computed bool + configFile *v1.ConfigFile + manifest *v1.Manifest + mediaType *types.MediaType + diffIDMap map[v1.Hash]v1.Layer + digestMap map[v1.Hash]v1.Layer +} + +var _ v1.Image = (*image)(nil) + +func (i *image) MediaType() (types.MediaType, error) { + if i.mediaType != nil { + return *i.mediaType, nil + } + return i.base.MediaType() +} + +func (i *image) compute() error { + // Don't re-compute if already computed. + if i.computed { + return nil + } + var configFile *v1.ConfigFile + if i.configFile != nil { + configFile = i.configFile + } else { + cf, err := i.base.ConfigFile() + if err != nil { + return err + } + configFile = cf.DeepCopy() + } + diffIDs := configFile.RootFS.DiffIDs + history := configFile.History + + diffIDMap := make(map[v1.Hash]v1.Layer) + digestMap := make(map[v1.Hash]v1.Layer) + + for _, add := range i.adds { + history = append(history, add.History) + if add.Layer != nil { + diffID, err := add.Layer.DiffID() + if err != nil { + return err + } + diffIDs = append(diffIDs, diffID) + diffIDMap[diffID] = add.Layer + } + } + + m, err := i.base.Manifest() + if err != nil { + return err + } + manifest := m.DeepCopy() + manifestLayers := manifest.Layers + for _, add := range i.adds { + if add.Layer == nil { + // Empty layers include only history in manifest. + continue + } + + desc, err := partial.Descriptor(add.Layer) + if err != nil { + return err + } + + // Fields in the addendum override the original descriptor. + if len(add.Annotations) != 0 { + desc.Annotations = add.Annotations + } + if len(add.URLs) != 0 { + desc.URLs = add.URLs + } + + if add.MediaType != "" { + desc.MediaType = add.MediaType + } + + manifestLayers = append(manifestLayers, *desc) + digestMap[desc.Digest] = add.Layer + } + + configFile.RootFS.DiffIDs = diffIDs + configFile.History = history + + manifest.Layers = manifestLayers + + rcfg, err := json.Marshal(configFile) + if err != nil { + return err + } + d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg)) + if err != nil { + return err + } + manifest.Config.Digest = d + manifest.Config.Size = sz + + // With OCI media types, this should not be set, see discussion: + // https://github.com/opencontainers/image-spec/pull/795 + if i.mediaType != nil { + if strings.Contains(string(*i.mediaType), types.OCIVendorPrefix) { + manifest.MediaType = "" + } else if strings.Contains(string(*i.mediaType), types.DockerVendorPrefix) { + manifest.MediaType = *i.mediaType + } + } + + i.configFile = configFile + i.manifest = manifest + i.diffIDMap = diffIDMap + i.digestMap = digestMap + i.computed = true + return nil +} + +// Layers returns the ordered collection of filesystem layers that comprise this image. +// The order of the list is oldest/base layer first, and most-recent/top layer last. +func (i *image) Layers() ([]v1.Layer, error) { + if err := i.compute(); err == stream.ErrNotComputed { + // Image contains a streamable layer which has not yet been + // consumed. Just return the layers we have in case the caller + // is going to consume the layers. + layers, err := i.base.Layers() + if err != nil { + return nil, err + } + for _, add := range i.adds { + layers = append(layers, add.Layer) + } + return layers, nil + } else if err != nil { + return nil, err + } + + diffIDs, err := partial.DiffIDs(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(diffIDs)) + for _, h := range diffIDs { + l, err := i.LayerByDiffID(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// ConfigName returns the hash of the image's config file. +func (i *image) ConfigName() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } + return partial.ConfigName(i) +} + +// ConfigFile returns this image's config file. +func (i *image) ConfigFile() (*v1.ConfigFile, error) { + if err := i.compute(); err != nil { + return nil, err + } + return i.configFile, nil +} + +// RawConfigFile returns the serialized bytes of ConfigFile() +func (i *image) RawConfigFile() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } + return json.Marshal(i.configFile) +} + +// Digest returns the sha256 of this image's manifest. +func (i *image) Digest() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } + return partial.Digest(i) +} + +// Size implements v1.Image. +func (i *image) Size() (int64, error) { + if err := i.compute(); err != nil { + return -1, err + } + return partial.Size(i) +} + +// Manifest returns this image's Manifest object. +func (i *image) Manifest() (*v1.Manifest, error) { + if err := i.compute(); err != nil { + return nil, err + } + return i.manifest, nil +} + +// RawManifest returns the serialized bytes of Manifest() +func (i *image) RawManifest() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } + return json.Marshal(i.manifest) +} + +// LayerByDigest returns a Layer for interacting with a particular layer of +// the image, looking it up by "digest" (the compressed hash). +func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) { + if cn, err := i.ConfigName(); err != nil { + return nil, err + } else if h == cn { + return partial.ConfigLayer(i) + } + if layer, ok := i.digestMap[h]; ok { + return layer, nil + } + return i.base.LayerByDigest(h) +} + +// LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" +// (the uncompressed hash). +func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { + if layer, ok := i.diffIDMap[h]; ok { + return layer, nil + } + return i.base.LayerByDiffID(h) +} + +func validate(adds []Addendum) error { + for _, add := range adds { + if add.Layer == nil && !add.History.EmptyLayer { + return errors.New("unable to add a nil layer to the image") + } + } + return nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go new file mode 100644 index 000000000..140d99aa1 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go @@ -0,0 +1,176 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "encoding/json" + "strings" + + "github.com/google/go-containerregistry/pkg/logs" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/match" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +func computeDescriptor(ia IndexAddendum) (*v1.Descriptor, error) { + desc, err := partial.Descriptor(ia.Add) + if err != nil { + return nil, err + } + + // The IndexAddendum allows overriding Descriptor values. + if ia.Descriptor.Size != 0 { + desc.Size = ia.Descriptor.Size + } + if string(ia.Descriptor.MediaType) != "" { + desc.MediaType = ia.Descriptor.MediaType + } + if ia.Descriptor.Digest != (v1.Hash{}) { + desc.Digest = ia.Descriptor.Digest + } + if ia.Descriptor.Platform != nil { + desc.Platform = ia.Descriptor.Platform + } + if len(ia.Descriptor.URLs) != 0 { + desc.URLs = ia.Descriptor.URLs + } + if len(ia.Descriptor.Annotations) != 0 { + desc.Annotations = ia.Descriptor.Annotations + } + + return desc, nil +} + +type index struct { + base v1.ImageIndex + adds []IndexAddendum + // remove is removed before adds + remove match.Matcher + + computed bool + manifest *v1.IndexManifest + mediaType *types.MediaType + imageMap map[v1.Hash]v1.Image + indexMap map[v1.Hash]v1.ImageIndex +} + +var _ v1.ImageIndex = (*index)(nil) + +func (i *index) MediaType() (types.MediaType, error) { + if i.mediaType != nil { + return *i.mediaType, nil + } + return i.base.MediaType() +} + +func (i *index) Size() (int64, error) { return partial.Size(i) } + +func (i *index) compute() error { + // Don't re-compute if already computed. + if i.computed { + return nil + } + + i.imageMap = make(map[v1.Hash]v1.Image) + i.indexMap = make(map[v1.Hash]v1.ImageIndex) + + m, err := i.base.IndexManifest() + if err != nil { + return err + } + manifest := m.DeepCopy() + manifests := manifest.Manifests + + if i.remove != nil { + var cleanedManifests []v1.Descriptor + for _, m := range manifests { + if !i.remove(m) { + cleanedManifests = append(cleanedManifests, m) + } + } + manifests = cleanedManifests + } + + for _, add := range i.adds { + desc, err := computeDescriptor(add) + if err != nil { + return err + } + + manifests = append(manifests, *desc) + if idx, ok := add.Add.(v1.ImageIndex); ok { + i.indexMap[desc.Digest] = idx + } else if img, ok := add.Add.(v1.Image); ok { + i.imageMap[desc.Digest] = img + } else { + logs.Warn.Printf("Unexpected index addendum: %T", add.Add) + } + } + + manifest.Manifests = manifests + + // With OCI media types, this should not be set, see discussion: + // https://github.com/opencontainers/image-spec/pull/795 + if i.mediaType != nil { + if strings.Contains(string(*i.mediaType), types.OCIVendorPrefix) { + manifest.MediaType = "" + } else if strings.Contains(string(*i.mediaType), types.DockerVendorPrefix) { + manifest.MediaType = *i.mediaType + } + } + + i.manifest = manifest + i.computed = true + return nil +} + +func (i *index) Image(h v1.Hash) (v1.Image, error) { + if img, ok := i.imageMap[h]; ok { + return img, nil + } + return i.base.Image(h) +} + +func (i *index) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + if idx, ok := i.indexMap[h]; ok { + return idx, nil + } + return i.base.ImageIndex(h) +} + +// Digest returns the sha256 of this image's manifest. +func (i *index) Digest() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } + return partial.Digest(i) +} + +// Manifest returns this image's Manifest object. +func (i *index) IndexManifest() (*v1.IndexManifest, error) { + if err := i.compute(); err != nil { + return nil, err + } + return i.manifest, nil +} + +// RawManifest returns the serialized bytes of Manifest() +func (i *index) RawManifest() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } + return json.Marshal(i.manifest) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go new file mode 100644 index 000000000..78b768fcc --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -0,0 +1,396 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "path/filepath" + "strings" + "time" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/internal/gzip" + "github.com/google/go-containerregistry/pkg/v1/match" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +const whiteoutPrefix = ".wh." + +// Addendum contains layers and history to be appended +// to a base image +type Addendum struct { + Layer v1.Layer + History v1.History + URLs []string + Annotations map[string]string + MediaType types.MediaType +} + +// AppendLayers applies layers to a base image. +func AppendLayers(base v1.Image, layers ...v1.Layer) (v1.Image, error) { + additions := make([]Addendum, 0, len(layers)) + for _, layer := range layers { + additions = append(additions, Addendum{Layer: layer}) + } + + return Append(base, additions...) +} + +// Append will apply the list of addendums to the base image +func Append(base v1.Image, adds ...Addendum) (v1.Image, error) { + if len(adds) == 0 { + return base, nil + } + if err := validate(adds); err != nil { + return nil, err + } + + return &image{ + base: base, + adds: adds, + }, nil +} + +// Appendable is an interface that represents something that can be appended +// to an ImageIndex. We need to be able to construct a v1.Descriptor in order +// to append something, and this is the minimum required information for that. +type Appendable interface { + MediaType() (types.MediaType, error) + Digest() (v1.Hash, error) + Size() (int64, error) +} + +// IndexAddendum represents an appendable thing and all the properties that +// we may want to override in the resulting v1.Descriptor. +type IndexAddendum struct { + Add Appendable + v1.Descriptor +} + +// AppendManifests appends a manifest to the ImageIndex. +func AppendManifests(base v1.ImageIndex, adds ...IndexAddendum) v1.ImageIndex { + return &index{ + base: base, + adds: adds, + } +} + +// RemoveManifests removes any descriptors that match the match.Matcher. +func RemoveManifests(base v1.ImageIndex, matcher match.Matcher) v1.ImageIndex { + return &index{ + base: base, + remove: matcher, + } +} + +// Config mutates the provided v1.Image to have the provided v1.Config +func Config(base v1.Image, cfg v1.Config) (v1.Image, error) { + cf, err := base.ConfigFile() + if err != nil { + return nil, err + } + + cf.Config = cfg + + return ConfigFile(base, cf) +} + +// ConfigFile mutates the provided v1.Image to have the provided v1.ConfigFile +func ConfigFile(base v1.Image, cfg *v1.ConfigFile) (v1.Image, error) { + m, err := base.Manifest() + if err != nil { + return nil, err + } + + image := &image{ + base: base, + manifest: m.DeepCopy(), + configFile: cfg, + } + + return image, nil +} + +// CreatedAt mutates the provided v1.Image to have the provided v1.Time +func CreatedAt(base v1.Image, created v1.Time) (v1.Image, error) { + cf, err := base.ConfigFile() + if err != nil { + return nil, err + } + + cfg := cf.DeepCopy() + cfg.Created = created + + return ConfigFile(base, cfg) +} + +// Extract takes an image and returns an io.ReadCloser containing the image's +// flattened filesystem. +// +// Callers can read the filesystem contents by passing the reader to +// tar.NewReader, or io.Copy it directly to some output. +// +// If a caller doesn't read the full contents, they should Close it to free up +// resources used during extraction. +func Extract(img v1.Image) io.ReadCloser { + pr, pw := io.Pipe() + + go func() { + // Close the writer with any errors encountered during + // extraction. These errors will be returned by the reader end + // on subsequent reads. If err == nil, the reader will return + // EOF. + pw.CloseWithError(extract(img, pw)) + }() + + return pr +} + +// Adapted from https://github.com/google/containerregistry/blob/da03b395ccdc4e149e34fbb540483efce962dc64/client/v2_2/docker_image_.py#L816 +func extract(img v1.Image, w io.Writer) error { + tarWriter := tar.NewWriter(w) + defer tarWriter.Close() + + fileMap := map[string]bool{} + + layers, err := img.Layers() + if err != nil { + return fmt.Errorf("retrieving image layers: %v", err) + } + // we iterate through the layers in reverse order because it makes handling + // whiteout layers more efficient, since we can just keep track of the removed + // files as we see .wh. layers and ignore those in previous layers. + for i := len(layers) - 1; i >= 0; i-- { + layer := layers[i] + layerReader, err := layer.Uncompressed() + if err != nil { + return fmt.Errorf("reading layer contents: %v", err) + } + defer layerReader.Close() + tarReader := tar.NewReader(layerReader) + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("reading tar: %v", err) + } + + basename := filepath.Base(header.Name) + dirname := filepath.Dir(header.Name) + tombstone := strings.HasPrefix(basename, whiteoutPrefix) + if tombstone { + basename = basename[len(whiteoutPrefix):] + } + + // check if we have seen value before + // if we're checking a directory, don't filepath.Join names + var name string + if header.Typeflag == tar.TypeDir { + name = header.Name + } else { + name = filepath.Join(dirname, basename) + } + + if _, ok := fileMap[name]; ok { + continue + } + + // check for a whited out parent directory + if inWhiteoutDir(fileMap, name) { + continue + } + + // mark file as handled. non-directory implicitly tombstones + // any entries with a matching (or child) name + fileMap[name] = tombstone || !(header.Typeflag == tar.TypeDir) + if !tombstone { + tarWriter.WriteHeader(header) + if header.Size > 0 { + if _, err := io.Copy(tarWriter, tarReader); err != nil { + return err + } + } + } + } + } + return nil +} + +func inWhiteoutDir(fileMap map[string]bool, file string) bool { + for { + if file == "" { + break + } + dirname := filepath.Dir(file) + if file == dirname { + break + } + if val, ok := fileMap[dirname]; ok && val { + return true + } + file = dirname + } + return false +} + +// Time sets all timestamps in an image to the given timestamp. +func Time(img v1.Image, t time.Time) (v1.Image, error) { + newImage := empty.Image + + layers, err := img.Layers() + if err != nil { + return nil, fmt.Errorf("getting image layers: %v", err) + } + + // Strip away all timestamps from layers + var newLayers []v1.Layer + for _, layer := range layers { + newLayer, err := layerTime(layer, t) + if err != nil { + return nil, fmt.Errorf("setting layer times: %v", err) + } + newLayers = append(newLayers, newLayer) + } + + newImage, err = AppendLayers(newImage, newLayers...) + if err != nil { + return nil, fmt.Errorf("appending layers: %v", err) + } + + ocf, err := img.ConfigFile() + if err != nil { + return nil, fmt.Errorf("getting original config file: %v", err) + } + + cf, err := newImage.ConfigFile() + if err != nil { + return nil, fmt.Errorf("setting config file: %v", err) + } + + cfg := cf.DeepCopy() + + // Copy basic config over + cfg.Architecture = ocf.Architecture + cfg.OS = ocf.OS + cfg.OSVersion = ocf.OSVersion + cfg.Config = ocf.Config + + // Strip away timestamps from the config file + cfg.Created = v1.Time{Time: t} + + for _, h := range cfg.History { + h.Created = v1.Time{Time: t} + } + + return ConfigFile(newImage, cfg) +} + +func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) { + layerReader, err := layer.Uncompressed() + if err != nil { + return nil, fmt.Errorf("getting layer: %v", err) + } + defer layerReader.Close() + w := new(bytes.Buffer) + tarWriter := tar.NewWriter(w) + defer tarWriter.Close() + + tarReader := tar.NewReader(layerReader) + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, fmt.Errorf("reading layer: %v", err) + } + + header.ModTime = t + if err := tarWriter.WriteHeader(header); err != nil { + return nil, fmt.Errorf("writing tar header: %v", err) + } + + if header.Typeflag == tar.TypeReg { + if _, err = io.Copy(tarWriter, tarReader); err != nil { + return nil, fmt.Errorf("writing layer file: %v", err) + } + } + } + + if err := tarWriter.Close(); err != nil { + return nil, err + } + + b := w.Bytes() + // gzip the contents, then create the layer + opener := func() (io.ReadCloser, error) { + return gzip.ReadCloser(ioutil.NopCloser(bytes.NewReader(b))), nil + } + layer, err = tarball.LayerFromOpener(opener) + if err != nil { + return nil, fmt.Errorf("creating layer: %v", err) + } + + return layer, nil +} + +// Canonical is a helper function to combine Time and configFile +// to remove any randomness during a docker build. +func Canonical(img v1.Image) (v1.Image, error) { + // Set all timestamps to 0 + created := time.Time{} + img, err := Time(img, created) + if err != nil { + return nil, err + } + + cf, err := img.ConfigFile() + if err != nil { + return nil, err + } + + // Get rid of host-dependent random config + cfg := cf.DeepCopy() + + cfg.Container = "" + cfg.Config.Hostname = "" + cfg.DockerVersion = "" + + return ConfigFile(img, cfg) +} + +// MediaType modifies the MediaType() of the given image. +func MediaType(img v1.Image, mt types.MediaType) v1.Image { + return &image{ + base: img, + mediaType: &mt, + } +} + +// IndexMediaType modifies the MediaType() of the given index. +func IndexMediaType(idx v1.ImageIndex, mt types.MediaType) v1.ImageIndex { + return &index{ + base: idx, + mediaType: &mt, + } +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go new file mode 100644 index 000000000..a86a65243 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go @@ -0,0 +1,144 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" +) + +// Rebase returns a new v1.Image where the oldBase in orig is replaced by newBase. +func Rebase(orig, oldBase, newBase v1.Image) (v1.Image, error) { + // Verify that oldBase's layers are present in orig, otherwise orig is + // not based on oldBase at all. + origLayers, err := orig.Layers() + if err != nil { + return nil, fmt.Errorf("failed to get layers for original: %v", err) + } + oldBaseLayers, err := oldBase.Layers() + if err != nil { + return nil, err + } + if len(oldBaseLayers) > len(origLayers) { + return nil, fmt.Errorf("image %q is not based on %q (too few layers)", orig, oldBase) + } + for i, l := range oldBaseLayers { + oldLayerDigest, err := l.Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest of layer %d of %q: %v", i, oldBase, err) + } + origLayerDigest, err := origLayers[i].Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest of layer %d of %q: %v", i, orig, err) + } + if oldLayerDigest != origLayerDigest { + return nil, fmt.Errorf("image %q is not based on %q (layer %d mismatch)", orig, oldBase, i) + } + } + + oldConfig, err := oldBase.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config for old base: %v", err) + } + + origConfig, err := orig.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config for original: %v", err) + } + + newConfig, err := newBase.ConfigFile() + if err != nil { + return nil, fmt.Errorf("could not get config for new base: %v", err) + } + + // Stitch together an image that contains: + // - original image's config + // - new base image's os/arch properties + // - new base image's layers + top of original image's layers + // - new base image's history + top of original image's history + rebasedImage, err := Config(empty.Image, *origConfig.Config.DeepCopy()) + if err != nil { + return nil, fmt.Errorf("failed to create empty image with original config: %v", err) + } + + // Add new config properties from existing images. + rebasedConfig, err := rebasedImage.ConfigFile() + if err != nil { + return nil, fmt.Errorf("could not get config for rebased image: %v", err) + } + // OS/Arch properties from new base + rebasedConfig.Architecture = newConfig.Architecture + rebasedConfig.OS = newConfig.OS + rebasedConfig.OSVersion = newConfig.OSVersion + + // Apply config properties to rebased. + rebasedImage, err = ConfigFile(rebasedImage, rebasedConfig) + if err != nil { + return nil, fmt.Errorf("failed to replace config for rebased image: %v", err) + } + + // Get new base layers and config for history. + newBaseLayers, err := newBase.Layers() + if err != nil { + return nil, fmt.Errorf("could not get new base layers for new base: %v", err) + } + // Add new base layers. + rebasedImage, err = Append(rebasedImage, createAddendums(0, 0, newConfig.History, newBaseLayers)...) + if err != nil { + return nil, fmt.Errorf("failed to append new base image: %v", err) + } + + // Add original layers above the old base. + rebasedImage, err = Append(rebasedImage, createAddendums(len(oldConfig.History), len(oldBaseLayers)+1, origConfig.History, origLayers)...) + if err != nil { + return nil, fmt.Errorf("failed to append original image: %v", err) + } + + return rebasedImage, nil +} + +// createAddendums makes a list of addendums from a history and layers starting from a specific history and layer +// indexes. +func createAddendums(startHistory, startLayer int, history []v1.History, layers []v1.Layer) []Addendum { + var adds []Addendum + // History should be a superset of layers; empty layers (e.g. ENV statements) only exist in history. + // They cannot be iterated identically but must be walked independently, only advancing the iterator for layers + // when a history entry for a non-empty layer is seen. + layerIndex := 0 + for historyIndex := range history { + var layer v1.Layer + emptyLayer := history[historyIndex].EmptyLayer + if !emptyLayer { + layer = layers[layerIndex] + layerIndex++ + } + if historyIndex >= startHistory || layerIndex >= startLayer { + adds = append(adds, Addendum{ + Layer: layer, + History: history[historyIndex], + }) + } + } + // In the event history was malformed or non-existent, append the remaining layers. + for i := layerIndex; i < len(layers); i++ { + if i >= startLayer { + adds = append(adds, Addendum{Layer: layers[layerIndex]}) + } + } + + return adds +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md new file mode 100644 index 000000000..009214a82 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md @@ -0,0 +1,66 @@ +# `partial` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial) + +## Partial Implementations + +There are roughly two kinds of image representations: compressed and uncompressed. + +The implementations for these kinds of images are almost identical, with the only +major difference being how blobs (config and layers) are fetched. This common +code lives in this package, where you provide a _partial_ implementation of a +compressed or uncompressed image, and you get back a full `v1.Image` implementation. + +### Examples + +In a registry, blobs are compressed, so it's easiest to implement a `v1.Image` in terms +of compressed layers. `remote.remoteImage` does this by implementing `CompressedImageCore`: + +```go +type CompressedImageCore interface { + RawConfigFile() ([]byte, error) + MediaType() (types.MediaType, error) + RawManifest() ([]byte, error) + LayerByDigest(v1.Hash) (CompressedLayer, error) +} +``` + +In a tarball, blobs are (often) uncompressed, so it's easiest to implement a `v1.Image` in terms +of uncompressed layers. `tarball.uncompressedImage` does this by implementing `UncompressedImageCore`: + +```go +type CompressedImageCore interface { + RawConfigFile() ([]byte, error) + MediaType() (types.MediaType, error) + LayerByDiffID(v1.Hash) (UncompressedLayer, error) +} +``` + +## Optional Methods + +Where possible, we access some information via optional methods as an optimization. + +### [`partial.Descriptor`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Descriptor) + +There are some properties of a [`Descriptor`](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) that aren't derivable from just image data: + +* `MediaType` +* `Platform` +* `URLs` +* `Annotations` + +For example, in a `tarball.Image`, there is a `LayerSources` field that contains +an entire layer descriptor with `URLs` information for foreign layers. This +information can be passed through to callers by implementing this optional +`Descriptor` method. + +See [`#654`](https://github.com/google/go-containerregistry/pull/654). + +### [`partial.UncompressedSize`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#UncompressedSize) + +Usually, you don't need to know the uncompressed size of a layer, since that +information isn't stored in a config file (just he sha256 is needed); however, +there are cases where it is very helpful to know the layer size, e.g. when +writing the uncompressed layer into a tarball. + +See [`#655`](https://github.com/google/go-containerregistry/pull/655). diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go new file mode 100644 index 000000000..0a2295579 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go @@ -0,0 +1,163 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "io" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/internal/gzip" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// CompressedLayer represents the bare minimum interface a natively +// compressed layer must implement for us to produce a v1.Layer +type CompressedLayer interface { + // Digest returns the Hash of the compressed layer. + Digest() (v1.Hash, error) + + // Compressed returns an io.ReadCloser for the compressed layer contents. + Compressed() (io.ReadCloser, error) + + // Size returns the compressed size of the Layer. + Size() (int64, error) + + // Returns the mediaType for the compressed Layer + MediaType() (types.MediaType, error) +} + +// compressedLayerExtender implements v1.Image using the compressed base properties. +type compressedLayerExtender struct { + CompressedLayer +} + +// Uncompressed implements v1.Layer +func (cle *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) { + r, err := cle.Compressed() + if err != nil { + return nil, err + } + return gzip.UnzipReadCloser(r) +} + +// DiffID implements v1.Layer +func (cle *compressedLayerExtender) DiffID() (v1.Hash, error) { + // If our nested CompressedLayer implements DiffID, + // then delegate to it instead. + if wdi, ok := cle.CompressedLayer.(WithDiffID); ok { + return wdi.DiffID() + } + r, err := cle.Uncompressed() + if err != nil { + return v1.Hash{}, err + } + defer r.Close() + h, _, err := v1.SHA256(r) + return h, err +} + +// CompressedToLayer fills in the missing methods from a CompressedLayer so that it implements v1.Layer +func CompressedToLayer(ul CompressedLayer) (v1.Layer, error) { + return &compressedLayerExtender{ul}, nil +} + +// CompressedImageCore represents the base minimum interface a natively +// compressed image must implement for us to produce a v1.Image. +type CompressedImageCore interface { + ImageCore + + // RawManifest returns the serialized bytes of the manifest. + RawManifest() ([]byte, error) + + // LayerByDigest is a variation on the v1.Image method, which returns + // a CompressedLayer instead. + LayerByDigest(v1.Hash) (CompressedLayer, error) +} + +// compressedImageExtender implements v1.Image by extending CompressedImageCore with the +// appropriate methods computed from the minimal core. +type compressedImageExtender struct { + CompressedImageCore +} + +// Assert that our extender type completes the v1.Image interface +var _ v1.Image = (*compressedImageExtender)(nil) + +// Digest implements v1.Image +func (i *compressedImageExtender) Digest() (v1.Hash, error) { + return Digest(i) +} + +// ConfigName implements v1.Image +func (i *compressedImageExtender) ConfigName() (v1.Hash, error) { + return ConfigName(i) +} + +// Layers implements v1.Image +func (i *compressedImageExtender) Layers() ([]v1.Layer, error) { + hs, err := FSLayers(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(hs)) + for _, h := range hs { + l, err := i.LayerByDigest(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// LayerByDigest implements v1.Image +func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { + cl, err := i.CompressedImageCore.LayerByDigest(h) + if err != nil { + return nil, err + } + return CompressedToLayer(cl) +} + +// LayerByDiffID implements v1.Image +func (i *compressedImageExtender) LayerByDiffID(h v1.Hash) (v1.Layer, error) { + h, err := DiffIDToBlob(i, h) + if err != nil { + return nil, err + } + return i.LayerByDigest(h) +} + +// ConfigFile implements v1.Image +func (i *compressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { + return ConfigFile(i) +} + +// Manifest implements v1.Image +func (i *compressedImageExtender) Manifest() (*v1.Manifest, error) { + return Manifest(i) +} + +// Size implements v1.Image +func (i *compressedImageExtender) Size() (int64, error) { + return Size(i) +} + +// CompressedToImage fills in the missing methods from a CompressedImageCore so that it implements v1.Image +func CompressedToImage(cic CompressedImageCore) (v1.Image, error) { + return &compressedImageExtender{ + CompressedImageCore: cic, + }, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go new file mode 100644 index 000000000..153dfe4d5 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package partial defines methods for building up a v1.Image from +// minimal subsets that are sufficient for defining a v1.Image. +package partial diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go new file mode 100644 index 000000000..c65f45e0d --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go @@ -0,0 +1,28 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// ImageCore is the core set of properties without which we cannot build a v1.Image +type ImageCore interface { + // RawConfigFile returns the serialized bytes of this image's config file. + RawConfigFile() ([]byte, error) + + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go new file mode 100644 index 000000000..9c7a92485 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go @@ -0,0 +1,85 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/match" +) + +// FindManifests given a v1.ImageIndex, find the manifests that fit the matcher. +func FindManifests(index v1.ImageIndex, matcher match.Matcher) ([]v1.Descriptor, error) { + // get the actual manifest list + indexManifest, err := index.IndexManifest() + if err != nil { + return nil, fmt.Errorf("unable to get raw index: %v", err) + } + manifests := []v1.Descriptor{} + // try to get the root of our image + for _, manifest := range indexManifest.Manifests { + if matcher(manifest) { + manifests = append(manifests, manifest) + } + } + return manifests, nil +} + +// FindImages given a v1.ImageIndex, find the images that fit the matcher. If a Descriptor +// matches the provider Matcher, but the referenced item is not an Image, ignores it. +// Only returns those that match the Matcher and are images. +func FindImages(index v1.ImageIndex, matcher match.Matcher) ([]v1.Image, error) { + matches := []v1.Image{} + manifests, err := FindManifests(index, matcher) + if err != nil { + return nil, err + } + for _, desc := range manifests { + // if it is not an image, ignore it + if !desc.MediaType.IsImage() { + continue + } + img, err := index.Image(desc.Digest) + if err != nil { + return nil, err + } + matches = append(matches, img) + } + return matches, nil +} + +// FindIndexes given a v1.ImageIndex, find the indexes that fit the matcher. If a Descriptor +// matches the provider Matcher, but the referenced item is not an Index, ignores it. +// Only returns those that match the Matcher and are indexes. +func FindIndexes(index v1.ImageIndex, matcher match.Matcher) ([]v1.ImageIndex, error) { + matches := []v1.ImageIndex{} + manifests, err := FindManifests(index, matcher) + if err != nil { + return nil, err + } + for _, desc := range manifests { + if !desc.MediaType.IsIndex() { + continue + } + // if it is not an index, ignore it + idx, err := index.ImageIndex(desc.Digest) + if err != nil { + return nil, err + } + matches = append(matches, idx) + } + return matches, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go new file mode 100644 index 000000000..69cea61c0 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go @@ -0,0 +1,223 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "bytes" + "io" + "sync" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/internal/gzip" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// UncompressedLayer represents the bare minimum interface a natively +// uncompressed layer must implement for us to produce a v1.Layer +type UncompressedLayer interface { + // DiffID returns the Hash of the uncompressed layer. + DiffID() (v1.Hash, error) + + // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. + Uncompressed() (io.ReadCloser, error) + + // Returns the mediaType for the compressed Layer + MediaType() (types.MediaType, error) +} + +// uncompressedLayerExtender implements v1.Image using the uncompressed base properties. +type uncompressedLayerExtender struct { + UncompressedLayer + // Memoize size/hash so that the methods aren't twice as + // expensive as doing this manually. + hash v1.Hash + size int64 + hashSizeError error + once sync.Once +} + +// Compressed implements v1.Layer +func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) { + u, err := ule.Uncompressed() + if err != nil { + return nil, err + } + return gzip.ReadCloser(u), nil +} + +// Digest implements v1.Layer +func (ule *uncompressedLayerExtender) Digest() (v1.Hash, error) { + ule.calcSizeHash() + return ule.hash, ule.hashSizeError +} + +// Size implements v1.Layer +func (ule *uncompressedLayerExtender) Size() (int64, error) { + ule.calcSizeHash() + return ule.size, ule.hashSizeError +} + +func (ule *uncompressedLayerExtender) calcSizeHash() { + ule.once.Do(func() { + var r io.ReadCloser + r, ule.hashSizeError = ule.Compressed() + if ule.hashSizeError != nil { + return + } + defer r.Close() + ule.hash, ule.size, ule.hashSizeError = v1.SHA256(r) + }) +} + +// UncompressedToLayer fills in the missing methods from an UncompressedLayer so that it implements v1.Layer +func UncompressedToLayer(ul UncompressedLayer) (v1.Layer, error) { + return &uncompressedLayerExtender{UncompressedLayer: ul}, nil +} + +// UncompressedImageCore represents the bare minimum interface a natively +// uncompressed image must implement for us to produce a v1.Image +type UncompressedImageCore interface { + ImageCore + + // LayerByDiffID is a variation on the v1.Image method, which returns + // an UncompressedLayer instead. + LayerByDiffID(v1.Hash) (UncompressedLayer, error) +} + +// UncompressedToImage fills in the missing methods from an UncompressedImageCore so that it implements v1.Image. +func UncompressedToImage(uic UncompressedImageCore) (v1.Image, error) { + return &uncompressedImageExtender{ + UncompressedImageCore: uic, + }, nil +} + +// uncompressedImageExtender implements v1.Image by extending UncompressedImageCore with the +// appropriate methods computed from the minimal core. +type uncompressedImageExtender struct { + UncompressedImageCore + + lock sync.Mutex + manifest *v1.Manifest +} + +// Assert that our extender type completes the v1.Image interface +var _ v1.Image = (*uncompressedImageExtender)(nil) + +// Digest implements v1.Image +func (i *uncompressedImageExtender) Digest() (v1.Hash, error) { + return Digest(i) +} + +// Manifest implements v1.Image +func (i *uncompressedImageExtender) Manifest() (*v1.Manifest, error) { + i.lock.Lock() + defer i.lock.Unlock() + if i.manifest != nil { + return i.manifest, nil + } + + b, err := i.RawConfigFile() + if err != nil { + return nil, err + } + + cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + + m := &v1.Manifest{ + SchemaVersion: 2, + MediaType: types.DockerManifestSchema2, + Config: v1.Descriptor{ + MediaType: types.DockerConfigJSON, + Size: cfgSize, + Digest: cfgHash, + }, + } + + ls, err := i.Layers() + if err != nil { + return nil, err + } + + m.Layers = make([]v1.Descriptor, len(ls)) + for i, l := range ls { + desc, err := Descriptor(l) + if err != nil { + return nil, err + } + + m.Layers[i] = *desc + } + + i.manifest = m + return i.manifest, nil +} + +// RawManifest implements v1.Image +func (i *uncompressedImageExtender) RawManifest() ([]byte, error) { + return RawManifest(i) +} + +// Size implements v1.Image +func (i *uncompressedImageExtender) Size() (int64, error) { + return Size(i) +} + +// ConfigName implements v1.Image +func (i *uncompressedImageExtender) ConfigName() (v1.Hash, error) { + return ConfigName(i) +} + +// ConfigFile implements v1.Image +func (i *uncompressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { + return ConfigFile(i) +} + +// Layers implements v1.Image +func (i *uncompressedImageExtender) Layers() ([]v1.Layer, error) { + diffIDs, err := DiffIDs(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(diffIDs)) + for _, h := range diffIDs { + l, err := i.LayerByDiffID(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// LayerByDiffID implements v1.Image +func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, error) { + ul, err := i.UncompressedImageCore.LayerByDiffID(diffID) + if err != nil { + return nil, err + } + return UncompressedToLayer(ul) +} + +// LayerByDigest implements v1.Image +func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { + diffID, err := BlobToDiffID(i, h) + if err != nil { + return nil, err + } + return i.LayerByDiffID(diffID) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go new file mode 100644 index 000000000..b24f31c7b --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go @@ -0,0 +1,382 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// WithRawConfigFile defines the subset of v1.Image used by these helper methods +type WithRawConfigFile interface { + // RawConfigFile returns the serialized bytes of this image's config file. + RawConfigFile() ([]byte, error) +} + +// ConfigFile is a helper for implementing v1.Image +func ConfigFile(i WithRawConfigFile) (*v1.ConfigFile, error) { + b, err := i.RawConfigFile() + if err != nil { + return nil, err + } + return v1.ParseConfigFile(bytes.NewReader(b)) +} + +// ConfigName is a helper for implementing v1.Image +func ConfigName(i WithRawConfigFile) (v1.Hash, error) { + b, err := i.RawConfigFile() + if err != nil { + return v1.Hash{}, err + } + h, _, err := v1.SHA256(bytes.NewReader(b)) + return h, err +} + +type configLayer struct { + hash v1.Hash + content []byte +} + +// Digest implements v1.Layer +func (cl *configLayer) Digest() (v1.Hash, error) { + return cl.hash, nil +} + +// DiffID implements v1.Layer +func (cl *configLayer) DiffID() (v1.Hash, error) { + return cl.hash, nil +} + +// Uncompressed implements v1.Layer +func (cl *configLayer) Uncompressed() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil +} + +// Compressed implements v1.Layer +func (cl *configLayer) Compressed() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil +} + +// Size implements v1.Layer +func (cl *configLayer) Size() (int64, error) { + return int64(len(cl.content)), nil +} + +func (cl *configLayer) MediaType() (types.MediaType, error) { + // Defaulting this to OCIConfigJSON as it should remain + // backwards compatible with DockerConfigJSON + return types.OCIConfigJSON, nil +} + +var _ v1.Layer = (*configLayer)(nil) + +// ConfigLayer implements v1.Layer from the raw config bytes. +// This is so that clients (e.g. remote) can access the config as a blob. +func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) { + h, err := ConfigName(i) + if err != nil { + return nil, err + } + rcfg, err := i.RawConfigFile() + if err != nil { + return nil, err + } + return &configLayer{ + hash: h, + content: rcfg, + }, nil +} + +// WithConfigFile defines the subset of v1.Image used by these helper methods +type WithConfigFile interface { + // ConfigFile returns this image's config file. + ConfigFile() (*v1.ConfigFile, error) +} + +// DiffIDs is a helper for implementing v1.Image +func DiffIDs(i WithConfigFile) ([]v1.Hash, error) { + cfg, err := i.ConfigFile() + if err != nil { + return nil, err + } + return cfg.RootFS.DiffIDs, nil +} + +// RawConfigFile is a helper for implementing v1.Image +func RawConfigFile(i WithConfigFile) ([]byte, error) { + cfg, err := i.ConfigFile() + if err != nil { + return nil, err + } + return json.Marshal(cfg) +} + +// WithRawManifest defines the subset of v1.Image used by these helper methods +type WithRawManifest interface { + // RawManifest returns the serialized bytes of this image's config file. + RawManifest() ([]byte, error) +} + +// Digest is a helper for implementing v1.Image +func Digest(i WithRawManifest) (v1.Hash, error) { + mb, err := i.RawManifest() + if err != nil { + return v1.Hash{}, err + } + digest, _, err := v1.SHA256(bytes.NewReader(mb)) + return digest, err +} + +// Manifest is a helper for implementing v1.Image +func Manifest(i WithRawManifest) (*v1.Manifest, error) { + b, err := i.RawManifest() + if err != nil { + return nil, err + } + return v1.ParseManifest(bytes.NewReader(b)) +} + +// WithManifest defines the subset of v1.Image used by these helper methods +type WithManifest interface { + // Manifest returns this image's Manifest object. + Manifest() (*v1.Manifest, error) +} + +// RawManifest is a helper for implementing v1.Image +func RawManifest(i WithManifest) ([]byte, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + return json.Marshal(m) +} + +// Size is a helper for implementing v1.Image +func Size(i WithRawManifest) (int64, error) { + b, err := i.RawManifest() + if err != nil { + return -1, err + } + return int64(len(b)), nil +} + +// FSLayers is a helper for implementing v1.Image +func FSLayers(i WithManifest) ([]v1.Hash, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + fsl := make([]v1.Hash, len(m.Layers)) + for i, l := range m.Layers { + fsl[i] = l.Digest + } + return fsl, nil +} + +// BlobSize is a helper for implementing v1.Image +func BlobSize(i WithManifest, h v1.Hash) (int64, error) { + d, err := BlobDescriptor(i, h) + if err != nil { + return -1, err + } + return d.Size, nil +} + +// BlobDescriptor is a helper for implementing v1.Image +func BlobDescriptor(i WithManifest, h v1.Hash) (*v1.Descriptor, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + + if m.Config.Digest == h { + return &m.Config, nil + } + + for _, l := range m.Layers { + if l.Digest == h { + return &l, nil + } + } + return nil, fmt.Errorf("blob %v not found", h) +} + +// WithManifestAndConfigFile defines the subset of v1.Image used by these helper methods +type WithManifestAndConfigFile interface { + WithConfigFile + + // Manifest returns this image's Manifest object. + Manifest() (*v1.Manifest, error) +} + +// BlobToDiffID is a helper for mapping between compressed +// and uncompressed blob hashes. +func BlobToDiffID(i WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { + blobs, err := FSLayers(i) + if err != nil { + return v1.Hash{}, err + } + diffIDs, err := DiffIDs(i) + if err != nil { + return v1.Hash{}, err + } + if len(blobs) != len(diffIDs) { + return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) + } + for i, blob := range blobs { + if blob == h { + return diffIDs[i], nil + } + } + return v1.Hash{}, fmt.Errorf("unknown blob %v", h) +} + +// DiffIDToBlob is a helper for mapping between uncompressed +// and compressed blob hashes. +func DiffIDToBlob(wm WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { + blobs, err := FSLayers(wm) + if err != nil { + return v1.Hash{}, err + } + diffIDs, err := DiffIDs(wm) + if err != nil { + return v1.Hash{}, err + } + if len(blobs) != len(diffIDs) { + return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) + } + for i, diffID := range diffIDs { + if diffID == h { + return blobs[i], nil + } + } + return v1.Hash{}, fmt.Errorf("unknown diffID %v", h) +} + +// WithDiffID defines the subset of v1.Layer for exposing the DiffID method. +type WithDiffID interface { + DiffID() (v1.Hash, error) +} + +// withDescriptor allows partial layer implementations to provide a layer +// descriptor to the partial image manifest builder. This allows partial +// uncompressed layers to provide foreign layer metadata like URLs to the +// uncompressed image manifest. +type withDescriptor interface { + Descriptor() (*v1.Descriptor, error) +} + +// Describable represents something for which we can produce a v1.Descriptor. +type Describable interface { + Digest() (v1.Hash, error) + MediaType() (types.MediaType, error) + Size() (int64, error) +} + +// Descriptor returns a v1.Descriptor given a Describable. It also encodes +// some logic for unwrapping things that have been wrapped by +// CompressedToLayer, UncompressedToLayer, CompressedToImage, or +// UncompressedToImage. +func Descriptor(d Describable) (*v1.Descriptor, error) { + // If Describable implements Descriptor itself, return that. + if wd, ok := d.(withDescriptor); ok { + return wd.Descriptor() + } + + // Otherwise, try to unwrap any partial implementations to see + // if the wrapped struct implements Descriptor. + if ule, ok := d.(*uncompressedLayerExtender); ok { + if wd, ok := ule.UncompressedLayer.(withDescriptor); ok { + return wd.Descriptor() + } + } + if cle, ok := d.(*compressedLayerExtender); ok { + if wd, ok := cle.CompressedLayer.(withDescriptor); ok { + return wd.Descriptor() + } + } + if uie, ok := d.(*uncompressedImageExtender); ok { + if wd, ok := uie.UncompressedImageCore.(withDescriptor); ok { + return wd.Descriptor() + } + } + if cie, ok := d.(*compressedImageExtender); ok { + if wd, ok := cie.CompressedImageCore.(withDescriptor); ok { + return wd.Descriptor() + } + } + + // If all else fails, compute the descriptor from the individual methods. + var ( + desc v1.Descriptor + err error + ) + + if desc.Size, err = d.Size(); err != nil { + return nil, err + } + if desc.Digest, err = d.Digest(); err != nil { + return nil, err + } + if desc.MediaType, err = d.MediaType(); err != nil { + return nil, err + } + + return &desc, nil +} + +type withUncompressedSize interface { + UncompressedSize() (int64, error) +} + +// UncompressedSize returns the size of the Uncompressed layer. If the +// underlying implementation doesn't implement UncompressedSize directly, +// this will compute the uncompressedSize by reading everything returned +// by Compressed(). This is potentially expensive and may consume the contents +// for streaming layers. +func UncompressedSize(l v1.Layer) (int64, error) { + // If the layer implements UncompressedSize itself, return that. + if wus, ok := l.(withUncompressedSize); ok { + return wus.UncompressedSize() + } + + // Otherwise, try to unwrap any partial implementations to see + // if the wrapped struct implements UncompressedSize. + if ule, ok := l.(*uncompressedLayerExtender); ok { + if wus, ok := ule.UncompressedLayer.(withUncompressedSize); ok { + return wus.UncompressedSize() + } + } + if cle, ok := l.(*compressedLayerExtender); ok { + if wus, ok := cle.CompressedLayer.(withUncompressedSize); ok { + return wus.UncompressedSize() + } + } + + // The layer doesn't implement UncompressedSize, we need to compute it. + rc, err := l.Uncompressed() + if err != nil { + return -1, err + } + defer rc.Close() + + return io.Copy(ioutil.Discard, rc) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go new file mode 100644 index 000000000..a586ab367 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go @@ -0,0 +1,59 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "sort" +) + +// Platform represents the target os/arch for an image. +type Platform struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` +} + +// Equals returns true if the given platform is semantically equivalent to this one. +// The order of Features and OSFeatures is not important. +func (p Platform) Equals(o Platform) bool { + return p.OS == o.OS && p.Architecture == o.Architecture && p.Variant == o.Variant && p.OSVersion == o.OSVersion && + stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) && stringSliceEqualIgnoreOrder(p.Features, o.Features) +} + +// stringSliceEqual compares 2 string slices and returns if their contents are identical. +func stringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, elm := range a { + if elm != b[i] { + return false + } + } + return true +} + +// stringSliceEqualIgnoreOrder compares 2 string slices and returns if their contents are identical, ignoring order +func stringSliceEqualIgnoreOrder(a, b []string) bool { + a1, b1 := a[:], b[:] + if a1 != nil && b1 != nil { + sort.Strings(a1) + sort.Strings(b1) + } + return stringSliceEqual(a1, b1) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go new file mode 100644 index 000000000..844f04d93 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go @@ -0,0 +1,25 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +// Update representation of an update of transfer progress. Some functions +// in this module can take a channel to which updates will be sent while a +// transfer is in progress. +// +k8s:deepcopy-gen=false +type Update struct { + Total int64 + Complete int64 + Error error +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md new file mode 100644 index 000000000..c1e81b310 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md @@ -0,0 +1,117 @@ +# `remote` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) + +The `remote` package implements a client for accessing a registry, +per the [OCI distribution spec](https://github.com/opencontainers/distribution-spec/blob/master/spec.md). + +It leans heavily on the lower level [`transport`](/pkg/v1/remote/transport) package, which handles the +authentication handshake and structured errors. + +## Usage + +```go +package main + +import ( + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +func main() { + ref, err := name.ParseReference("gcr.io/google-containers/pause") + if err != nil { + panic(err) + } + + img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) + if err != nil { + panic(err) + } + + // do stuff with img +} +``` + +## Structure + +

+ +

+ + +## Background + +There are a lot of confusingly similar terms that come up when talking about images in registries. + +### Anatomy of an image + +In general... + +* A tag refers to an image manifest. +* An image manifest references a config file and an orderered list of _compressed_ layers by sha256 digest. +* A config file references an ordered list of _uncompressed_ layers by sha256 digest and contains runtime configuration. +* The sha256 digest of the config file is the [image id](https://github.com/opencontainers/image-spec/blob/master/config.md#imageid) for the image. + +For example, an image with two layers would look something like this: + +![image anatomy](/images/image-anatomy.dot.svg) + +### Anatomy of an index + +In the normal case, an [index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) is used to represent a multi-platform image. +This was the original use case for a [manifest +list](https://docs.docker.com/registry/spec/manifest-v2-2/#manifest-list). + +![image index anatomy](/images/index-anatomy.dot.svg) + +It is possible for an index to reference another index, per the OCI +[image-spec](https://github.com/opencontainers/image-spec/blob/master/media-types.md#compatibility-matrix). +In theory, both an image and image index can reference arbitrary things via +[descriptors](https://github.com/opencontainers/image-spec/blob/master/descriptor.md), +e.g. see the [image layout +example](https://github.com/opencontainers/image-spec/blob/master/image-layout.md#index-example), +which references an application/xml file from an image index. + +That could look something like this: + +![strange image index anatomy](/images/index-anatomy-strange.dot.svg) + +Using a recursive index like this might not be possible with all registries, +but this flexibility allows for some interesting applications, e.g. the +[OCI Artifacts](https://github.com/opencontainers/artifacts) effort. + +### Anatomy of an image upload + +The structure of an image requires a delicate ordering when uploading an image to a registry. +Below is a (slightly simplified) figure that describes how an image is prepared for upload +to a registry and how the data flows between various artifacts: + +![upload](/images/upload.dot.svg) + +Note that: + +* A config file references the uncompressed layer contents by sha256. +* A manifest references the compressed layer contents by sha256 and the size of the layer. +* A manifest references the config file contents by sha256 and the size of the file. + +It follows that during an upload, we need to upload layers before the config file, +and we need to upload the config file before the manifest. + +Sometimes, we know all of this information ahead of time, (e.g. when copying from remote.Image), +so the ordering is less important. + +In other cases, e.g. when using a [`stream.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream#Layer), +we can't compute anything until we have already uploaded the layer, so we need to be careful about ordering. + +## Caveats + +### schema 1 + +This package does not support schema 1 images, see [`#377`](https://github.com/google/go-containerregistry/issues/377), +however, it's possible to do _something_ useful with them via [`remote.Get`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote#Get), +which doesn't try to interpret what is returned by the registry. + +[`crane.Copy`](https://godoc.org/github.com/google/go-containerregistry/pkg/crane#Copy) takes advantage of this to implement support for copying schema 1 images, +see [here](https://github.com/google/go-containerregistry/blob/main/pkg/internal/legacy/copy.go). diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go new file mode 100644 index 000000000..21b5dbbaa --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go @@ -0,0 +1,151 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +type catalog struct { + Repos []string `json:"repositories"` +} + +// CatalogPage calls /_catalog, returning the list of repositories on the registry. +func CatalogPage(target name.Registry, last string, n int, options ...Option) ([]string, error) { + o, err := makeOptions(target, options...) + if err != nil { + return nil, err + } + + scopes := []string{target.Scope(transport.PullScope)} + tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes) + if err != nil { + return nil, err + } + + query := fmt.Sprintf("last=%s&n=%d", url.QueryEscape(last), n) + + uri := url.URL{ + Scheme: target.Scheme(), + Host: target.RegistryStr(), + Path: "/v2/_catalog", + RawQuery: query, + } + + client := http.Client{Transport: tr} + req, err := http.NewRequest(http.MethodGet, uri.String(), nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req.WithContext(o.context)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + var parsed catalog + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { + return nil, err + } + + return parsed.Repos, nil +} + +// Catalog calls /_catalog, returning the list of repositories on the registry. +func Catalog(ctx context.Context, target name.Registry, options ...Option) ([]string, error) { + o, err := makeOptions(target, options...) + if err != nil { + return nil, err + } + + scopes := []string{target.Scope(transport.PullScope)} + tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes) + if err != nil { + return nil, err + } + + uri := &url.URL{ + Scheme: target.Scheme(), + Host: target.RegistryStr(), + Path: "/v2/_catalog", + RawQuery: "n=10000", + } + + client := http.Client{Transport: tr} + + // WithContext overrides the ctx passed directly. + if o.context != context.Background() { + ctx = o.context + } + + var ( + parsed catalog + repoList []string + ) + + // get responses until there is no next page + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + req, err := http.NewRequest("GET", uri.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { + return nil, err + } + if err := resp.Body.Close(); err != nil { + return nil, err + } + + repoList = append(repoList, parsed.Repos...) + + uri, err = getNextPageURL(resp) + if err != nil { + return nil, err + } + // no next page + if uri == nil { + break + } + } + return repoList, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go new file mode 100644 index 000000000..c841cc058 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go @@ -0,0 +1,59 @@ +package remote + +import ( + "context" + "fmt" + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +// CheckPushPermission returns an error if the given keychain cannot authorize +// a push operation to the given ref. +// +// This can be useful to check whether the caller has permission to push an +// image before doing work to construct the image. +// +// TODO(#412): Remove the need for this method. +func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTripper) error { + auth, err := kc.Resolve(ref.Context().Registry) + if err != nil { + return fmt.Errorf("resolving authorization for %v failed: %v", ref.Context().Registry, err) + } + + scopes := []string{ref.Scope(transport.PushScope)} + tr, err := transport.New(ref.Context().Registry, auth, t, scopes) + if err != nil { + return fmt.Errorf("creating push check transport for %v failed: %v", ref.Context().Registry, err) + } + // TODO(jasonhall): Against GCR, just doing the token handshake is + // enough, but this doesn't extend to Dockerhub + // (https://github.com/docker/hub-feedback/issues/1771), so we actually + // need to initiate an upload to tell whether the credentials can + // authorize a push. Figure out how to return early here when we can, + // to avoid a roundtrip for spec-compliant registries. + w := writer{ + repo: ref.Context(), + client: &http.Client{Transport: tr}, + context: context.Background(), + } + loc, _, err := w.initiateUpload("", "") + if loc != "" { + // Since we're only initiating the upload to check whether we + // can, we should attempt to cancel it, in case initiating + // reserves some resources on the server. We shouldn't wait for + // cancelling to complete, and we don't care if it fails. + go w.cancelUpload(loc) + } + return err +} + +func (w *writer) cancelUpload(loc string) { + req, err := http.NewRequest(http.MethodDelete, loc, nil) + if err != nil { + return + } + _, _ = w.client.Do(req) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go new file mode 100644 index 000000000..3b9022719 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go @@ -0,0 +1,57 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +// Delete removes the specified image reference from the remote registry. +func Delete(ref name.Reference, options ...Option) error { + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return err + } + scopes := []string{ref.Scope(transport.DeleteScope)} + tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) + if err != nil { + return err + } + c := &http.Client{Transport: tr} + + u := url.URL{ + Scheme: ref.Context().Registry.Scheme(), + Host: ref.Context().RegistryStr(), + Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()), + } + + req, err := http.NewRequest(http.MethodDelete, u.String(), nil) + if err != nil { + return err + } + + resp, err := c.Do(req.WithContext(o.context)) + if err != nil { + return err + } + defer resp.Body.Close() + + return transport.CheckError(resp, http.StatusOK, http.StatusAccepted) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go new file mode 100644 index 000000000..9c248cfe2 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go @@ -0,0 +1,392 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/internal/verify" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// ErrSchema1 indicates that we received a schema1 manifest from the registry. +// This library doesn't have plans to support this legacy image format: +// https://github.com/google/go-containerregistry/issues/377 +type ErrSchema1 struct { + schema string +} + +// newErrSchema1 returns an ErrSchema1 with the unexpected MediaType. +func newErrSchema1(schema types.MediaType) error { + return &ErrSchema1{ + schema: string(schema), + } +} + +// Error implements error. +func (e *ErrSchema1) Error() string { + return fmt.Sprintf("unsupported MediaType: %q, see https://github.com/google/go-containerregistry/issues/377", e.schema) +} + +// Descriptor provides access to metadata about remote artifact and accessors +// for efficiently converting it into a v1.Image or v1.ImageIndex. +type Descriptor struct { + fetcher + v1.Descriptor + Manifest []byte + + // So we can share this implementation with Image.. + platform v1.Platform +} + +// RawManifest exists to satisfy the Taggable interface. +func (d *Descriptor) RawManifest() ([]byte, error) { + return d.Manifest, nil +} + +// Get returns a remote.Descriptor for the given reference. The response from +// the registry is left un-interpreted, for the most part. This is useful for +// querying what kind of artifact a reference represents. +// +// See Head if you don't need the response body. +func Get(ref name.Reference, options ...Option) (*Descriptor, error) { + acceptable := []types.MediaType{ + // Just to look at them. + types.DockerManifestSchema1, + types.DockerManifestSchema1Signed, + } + acceptable = append(acceptable, acceptableImageMediaTypes...) + acceptable = append(acceptable, acceptableIndexMediaTypes...) + return get(ref, acceptable, options...) +} + +// Head returns a v1.Descriptor for the given reference by issuing a HEAD +// request. +// +// Note that the server response will not have a body, so any errors encountered +// should be retried with Get to get more details. +func Head(ref name.Reference, options ...Option) (*v1.Descriptor, error) { + acceptable := []types.MediaType{ + // Just to look at them. + types.DockerManifestSchema1, + types.DockerManifestSchema1Signed, + } + acceptable = append(acceptable, acceptableImageMediaTypes...) + acceptable = append(acceptable, acceptableIndexMediaTypes...) + + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return nil, err + } + + f, err := makeFetcher(ref, o) + if err != nil { + return nil, err + } + + return f.headManifest(ref, acceptable) +} + +// Handle options and fetch the manifest with the acceptable MediaTypes in the +// Accept header. +func get(ref name.Reference, acceptable []types.MediaType, options ...Option) (*Descriptor, error) { + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return nil, err + } + f, err := makeFetcher(ref, o) + if err != nil { + return nil, err + } + b, desc, err := f.fetchManifest(ref, acceptable) + if err != nil { + return nil, err + } + return &Descriptor{ + fetcher: *f, + Manifest: b, + Descriptor: *desc, + platform: o.platform, + }, nil +} + +// Image converts the Descriptor into a v1.Image. +// +// If the fetched artifact is already an image, it will just return it. +// +// If the fetched artifact is an index, it will attempt to resolve the index to +// a child image with the appropriate platform. +// +// See WithPlatform to set the desired platform. +func (d *Descriptor) Image() (v1.Image, error) { + switch d.MediaType { + case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: + // We don't care to support schema 1 images: + // https://github.com/google/go-containerregistry/issues/377 + return nil, newErrSchema1(d.MediaType) + case types.OCIImageIndex, types.DockerManifestList: + // We want an image but the registry has an index, resolve it to an image. + return d.remoteIndex().imageByPlatform(d.platform) + case types.OCIManifestSchema1, types.DockerManifestSchema2: + // These are expected. Enumerated here to allow a default case. + default: + // We could just return an error here, but some registries (e.g. static + // registries) don't set the Content-Type headers correctly, so instead... + logs.Warn.Printf("Unexpected media type for Image(): %s", d.MediaType) + } + + // Wrap the v1.Layers returned by this v1.Image in a hint for downstream + // remote.Write calls to facilitate cross-repo "mounting". + imgCore, err := partial.CompressedToImage(d.remoteImage()) + if err != nil { + return nil, err + } + return &mountableImage{ + Image: imgCore, + Reference: d.Ref, + }, nil +} + +// ImageIndex converts the Descriptor into a v1.ImageIndex. +func (d *Descriptor) ImageIndex() (v1.ImageIndex, error) { + switch d.MediaType { + case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: + // We don't care to support schema 1 images: + // https://github.com/google/go-containerregistry/issues/377 + return nil, newErrSchema1(d.MediaType) + case types.OCIManifestSchema1, types.DockerManifestSchema2: + // We want an index but the registry has an image, nothing we can do. + return nil, fmt.Errorf("unexpected media type for ImageIndex(): %s; call Image() instead", d.MediaType) + case types.OCIImageIndex, types.DockerManifestList: + // These are expected. + default: + // We could just return an error here, but some registries (e.g. static + // registries) don't set the Content-Type headers correctly, so instead... + logs.Warn.Printf("Unexpected media type for ImageIndex(): %s", d.MediaType) + } + return d.remoteIndex(), nil +} + +func (d *Descriptor) remoteImage() *remoteImage { + return &remoteImage{ + fetcher: d.fetcher, + manifest: d.Manifest, + mediaType: d.MediaType, + descriptor: &d.Descriptor, + } +} + +func (d *Descriptor) remoteIndex() *remoteIndex { + return &remoteIndex{ + fetcher: d.fetcher, + manifest: d.Manifest, + mediaType: d.MediaType, + descriptor: &d.Descriptor, + } +} + +// fetcher implements methods for reading from a registry. +type fetcher struct { + Ref name.Reference + Client *http.Client + context context.Context +} + +func makeFetcher(ref name.Reference, o *options) (*fetcher, error) { + tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, []string{ref.Scope(transport.PullScope)}) + if err != nil { + return nil, err + } + return &fetcher{ + Ref: ref, + Client: &http.Client{Transport: tr}, + context: o.context, + }, nil +} + +// url returns a url.Url for the specified path in the context of this remote image reference. +func (f *fetcher) url(resource, identifier string) url.URL { + return url.URL{ + Scheme: f.Ref.Context().Registry.Scheme(), + Host: f.Ref.Context().RegistryStr(), + Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier), + } +} + +func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) { + u := f.url("manifests", ref.Identifier()) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, nil, err + } + accept := []string{} + for _, mt := range acceptable { + accept = append(accept, string(mt)) + } + req.Header.Set("Accept", strings.Join(accept, ",")) + + resp, err := f.Client.Do(req.WithContext(f.context)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, nil, err + } + + manifest, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, nil, err + } + + digest, size, err := v1.SHA256(bytes.NewReader(manifest)) + if err != nil { + return nil, nil, err + } + + mediaType := types.MediaType(resp.Header.Get("Content-Type")) + contentDigest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest")) + if err == nil && mediaType == types.DockerManifestSchema1Signed { + // If we can parse the digest from the header, and it's a signed schema 1 + // manifest, let's use that for the digest to appease older registries. + digest = contentDigest + } + + // Validate the digest matches what we asked for, if pulling by digest. + if dgst, ok := ref.(name.Digest); ok { + if digest.String() != dgst.DigestStr() { + return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) + } + } + // Do nothing for tags; I give up. + // + // We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry, + // but so many registries implement this incorrectly that it's not worth checking. + // + // For reference: + // https://github.com/GoogleContainerTools/kaniko/issues/298 + + // Return all this info since we have to calculate it anyway. + desc := v1.Descriptor{ + Digest: digest, + Size: size, + MediaType: mediaType, + } + + return manifest, &desc, nil +} + +func (f *fetcher) headManifest(ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) { + u := f.url("manifests", ref.Identifier()) + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return nil, err + } + accept := []string{} + for _, mt := range acceptable { + accept = append(accept, string(mt)) + } + req.Header.Set("Accept", strings.Join(accept, ",")) + + resp, err := f.Client.Do(req.WithContext(f.context)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + mediaType := types.MediaType(resp.Header.Get("Content-Type")) + + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + return nil, err + } + + digest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest")) + if err != nil { + return nil, err + } + + // Validate the digest matches what we asked for, if pulling by digest. + if dgst, ok := ref.(name.Digest); ok { + if digest.String() != dgst.DigestStr() { + return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) + } + } + + // Return all this info since we have to calculate it anyway. + return &v1.Descriptor{ + Digest: digest, + Size: size, + MediaType: mediaType, + }, nil +} + +func (f *fetcher) fetchBlob(ctx context.Context, h v1.Hash) (io.ReadCloser, error) { + u := f.url("blobs", h.String()) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + resp, err := f.Client.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + resp.Body.Close() + return nil, err + } + + return verify.ReadCloser(resp.Body, h) +} + +func (f *fetcher) headBlob(h v1.Hash) (*http.Response, error) { + u := f.url("blobs", h.String()) + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return nil, err + } + + resp, err := f.Client.Do(req.WithContext(f.context)) + if err != nil { + return nil, err + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + resp.Body.Close() + return nil, err + } + + return resp, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go new file mode 100644 index 000000000..846ba07cd --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package remote provides facilities for reading/writing v1.Images from/to +// a remote image registry. +package remote diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go new file mode 100644 index 000000000..e04961e90 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go @@ -0,0 +1,230 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "io" + "io/ioutil" + "net/http" + "net/url" + "sync" + + "github.com/google/go-containerregistry/pkg/internal/redact" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/internal/verify" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +var acceptableImageMediaTypes = []types.MediaType{ + types.DockerManifestSchema2, + types.OCIManifestSchema1, +} + +// remoteImage accesses an image from a remote registry +type remoteImage struct { + fetcher + manifestLock sync.Mutex // Protects manifest + manifest []byte + configLock sync.Mutex // Protects config + config []byte + mediaType types.MediaType + descriptor *v1.Descriptor +} + +var _ partial.CompressedImageCore = (*remoteImage)(nil) + +// Image provides access to a remote image reference. +func Image(ref name.Reference, options ...Option) (v1.Image, error) { + desc, err := Get(ref, options...) + if err != nil { + return nil, err + } + + return desc.Image() +} + +func (r *remoteImage) MediaType() (types.MediaType, error) { + if string(r.mediaType) != "" { + return r.mediaType, nil + } + return types.DockerManifestSchema2, nil +} + +func (r *remoteImage) RawManifest() ([]byte, error) { + r.manifestLock.Lock() + defer r.manifestLock.Unlock() + if r.manifest != nil { + return r.manifest, nil + } + + // NOTE(jonjohnsonjr): We should never get here because the public entrypoints + // do type-checking via remote.Descriptor. I've left this here for tests that + // directly instantiate a remoteImage. + manifest, desc, err := r.fetchManifest(r.Ref, acceptableImageMediaTypes) + if err != nil { + return nil, err + } + + if r.descriptor == nil { + r.descriptor = desc + } + r.mediaType = desc.MediaType + r.manifest = manifest + return r.manifest, nil +} + +func (r *remoteImage) RawConfigFile() ([]byte, error) { + r.configLock.Lock() + defer r.configLock.Unlock() + if r.config != nil { + return r.config, nil + } + + m, err := partial.Manifest(r) + if err != nil { + return nil, err + } + + body, err := r.fetchBlob(r.context, m.Config.Digest) + if err != nil { + return nil, err + } + defer body.Close() + + r.config, err = ioutil.ReadAll(body) + if err != nil { + return nil, err + } + return r.config, nil +} + +// Descriptor retains the original descriptor from an index manifest. +// See partial.Descriptor. +func (r *remoteImage) Descriptor() (*v1.Descriptor, error) { + // kind of a hack, but RawManifest does appropriate locking/memoization + // and makes sure r.descriptor is populated. + _, err := r.RawManifest() + return r.descriptor, err +} + +// remoteImageLayer implements partial.CompressedLayer +type remoteImageLayer struct { + ri *remoteImage + digest v1.Hash +} + +// Digest implements partial.CompressedLayer +func (rl *remoteImageLayer) Digest() (v1.Hash, error) { + return rl.digest, nil +} + +// Compressed implements partial.CompressedLayer +func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) { + urls := []url.URL{rl.ri.url("blobs", rl.digest.String())} + + // Add alternative layer sources from URLs (usually none). + d, err := partial.BlobDescriptor(rl, rl.digest) + if err != nil { + return nil, err + } + + // We don't want to log binary layers -- this can break terminals. + ctx := redact.NewContext(rl.ri.context, "omitting binary blobs from logs") + + for _, s := range d.URLs { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + urls = append(urls, *u) + } + + // The lastErr for most pulls will be the same (the first error), but for + // foreign layers we'll want to surface the last one, since we try to pull + // from the registry first, which would often fail. + // TODO: Maybe we don't want to try pulling from the registry first? + var lastErr error + for _, u := range urls { + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + resp, err := rl.ri.Client.Do(req.WithContext(ctx)) + if err != nil { + lastErr = err + continue + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + resp.Body.Close() + lastErr = err + continue + } + + return verify.ReadCloser(resp.Body, rl.digest) + } + + return nil, lastErr +} + +// Manifest implements partial.WithManifest so that we can use partial.BlobSize below. +func (rl *remoteImageLayer) Manifest() (*v1.Manifest, error) { + return partial.Manifest(rl.ri) +} + +// MediaType implements v1.Layer +func (rl *remoteImageLayer) MediaType() (types.MediaType, error) { + bd, err := partial.BlobDescriptor(rl, rl.digest) + if err != nil { + return "", err + } + + return bd.MediaType, nil +} + +// Size implements partial.CompressedLayer +func (rl *remoteImageLayer) Size() (int64, error) { + // Look up the size of this digest in the manifest to avoid a request. + return partial.BlobSize(rl, rl.digest) +} + +// ConfigFile implements partial.WithManifestAndConfigFile so that we can use partial.BlobToDiffID below. +func (rl *remoteImageLayer) ConfigFile() (*v1.ConfigFile, error) { + return partial.ConfigFile(rl.ri) +} + +// DiffID implements partial.WithDiffID so that we don't recompute a DiffID that we already have +// available in our ConfigFile. +func (rl *remoteImageLayer) DiffID() (v1.Hash, error) { + return partial.BlobToDiffID(rl, rl.digest) +} + +// Descriptor retains the original descriptor from an image manifest. +// See partial.Descriptor. +func (rl *remoteImageLayer) Descriptor() (*v1.Descriptor, error) { + return partial.BlobDescriptor(rl, rl.digest) +} + +// LayerByDigest implements partial.CompressedLayer +func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { + return &remoteImageLayer{ + ri: r, + digest: h, + }, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go new file mode 100644 index 000000000..7a81075c4 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go @@ -0,0 +1,237 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "fmt" + "sync" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +var acceptableIndexMediaTypes = []types.MediaType{ + types.DockerManifestList, + types.OCIImageIndex, +} + +// remoteIndex accesses an index from a remote registry +type remoteIndex struct { + fetcher + manifestLock sync.Mutex // Protects manifest + manifest []byte + mediaType types.MediaType + descriptor *v1.Descriptor +} + +// Index provides access to a remote index reference. +func Index(ref name.Reference, options ...Option) (v1.ImageIndex, error) { + desc, err := get(ref, acceptableIndexMediaTypes, options...) + if err != nil { + return nil, err + } + + return desc.ImageIndex() +} + +func (r *remoteIndex) MediaType() (types.MediaType, error) { + if string(r.mediaType) != "" { + return r.mediaType, nil + } + return types.DockerManifestList, nil +} + +func (r *remoteIndex) Digest() (v1.Hash, error) { + return partial.Digest(r) +} + +func (r *remoteIndex) Size() (int64, error) { + return partial.Size(r) +} + +func (r *remoteIndex) RawManifest() ([]byte, error) { + r.manifestLock.Lock() + defer r.manifestLock.Unlock() + if r.manifest != nil { + return r.manifest, nil + } + + // NOTE(jonjohnsonjr): We should never get here because the public entrypoints + // do type-checking via remote.Descriptor. I've left this here for tests that + // directly instantiate a remoteIndex. + manifest, desc, err := r.fetchManifest(r.Ref, acceptableIndexMediaTypes) + if err != nil { + return nil, err + } + + if r.descriptor == nil { + r.descriptor = desc + } + r.mediaType = desc.MediaType + r.manifest = manifest + return r.manifest, nil +} + +func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) { + b, err := r.RawManifest() + if err != nil { + return nil, err + } + return v1.ParseIndexManifest(bytes.NewReader(b)) +} + +func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) { + desc, err := r.childByHash(h) + if err != nil { + return nil, err + } + + // Descriptor.Image will handle coercing nested indexes into an Image. + return desc.Image() +} + +// Descriptor retains the original descriptor from an index manifest. +// See partial.Descriptor. +func (r *remoteIndex) Descriptor() (*v1.Descriptor, error) { + // kind of a hack, but RawManifest does appropriate locking/memoization + // and makes sure r.descriptor is populated. + _, err := r.RawManifest() + return r.descriptor, err +} + +func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + desc, err := r.childByHash(h) + if err != nil { + return nil, err + } + return desc.ImageIndex() +} + +func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) { + desc, err := r.childByPlatform(platform) + if err != nil { + return nil, err + } + + // Descriptor.Image will handle coercing nested indexes into an Image. + return desc.Image() +} + +// This naively matches the first manifest with matching platform attributes. +// +// We should probably use this instead: +// github.com/containerd/containerd/platforms +// +// But first we'd need to migrate to: +// github.com/opencontainers/image-spec/specs-go/v1 +func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error) { + index, err := r.IndexManifest() + if err != nil { + return nil, err + } + for _, childDesc := range index.Manifests { + // If platform is missing from child descriptor, assume it's amd64/linux. + p := defaultPlatform + if childDesc.Platform != nil { + p = *childDesc.Platform + } + + if matchesPlatform(p, platform) { + return r.childDescriptor(childDesc, platform) + } + } + return nil, fmt.Errorf("no child with platform %s/%s in index %s", platform.OS, platform.Architecture, r.Ref) +} + +func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) { + index, err := r.IndexManifest() + if err != nil { + return nil, err + } + for _, childDesc := range index.Manifests { + if h == childDesc.Digest { + return r.childDescriptor(childDesc, defaultPlatform) + } + } + return nil, fmt.Errorf("no child with digest %s in index %s", h, r.Ref) +} + +// Convert one of this index's child's v1.Descriptor into a remote.Descriptor, with the given platform option. +func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform) (*Descriptor, error) { + ref := r.Ref.Context().Digest(child.Digest.String()) + manifest, _, err := r.fetchManifest(ref, []types.MediaType{child.MediaType}) + if err != nil { + return nil, err + } + return &Descriptor{ + fetcher: fetcher{ + Ref: ref, + Client: r.Client, + context: r.context, + }, + Manifest: manifest, + Descriptor: child, + platform: platform, + }, nil +} + +// matchesPlatform checks if the given platform matches the required platforms. +// The given platform matches the required platform if +// - architecture and OS are identical. +// - OS version and variant are identical if provided. +// - features and OS features of the required platform are subsets of those of the given platform. +func matchesPlatform(given, required v1.Platform) bool { + // Required fields that must be identical. + if given.Architecture != required.Architecture || given.OS != required.OS { + return false + } + + // Optional fields that may be empty, but must be identical if provided. + if required.OSVersion != "" && given.OSVersion != required.OSVersion { + return false + } + if required.Variant != "" && given.Variant != required.Variant { + return false + } + + // Verify required platform's features are a subset of given platform's features. + if !isSubset(given.OSFeatures, required.OSFeatures) { + return false + } + if !isSubset(given.Features, required.Features) { + return false + } + + return true +} + +// isSubset checks if the required array of strings is a subset of the given lst. +func isSubset(lst, required []string) bool { + set := make(map[string]bool) + for _, value := range lst { + set[value] = true + } + + for _, value := range required { + if _, ok := set[value]; !ok { + return false + } + } + + return true +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go new file mode 100644 index 000000000..6a941a1af --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go @@ -0,0 +1,87 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "io" + + "github.com/google/go-containerregistry/pkg/internal/redact" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// remoteImagelayer implements partial.CompressedLayer +type remoteLayer struct { + fetcher + digest v1.Hash +} + +// Compressed implements partial.CompressedLayer +func (rl *remoteLayer) Compressed() (io.ReadCloser, error) { + // We don't want to log binary layers -- this can break terminals. + ctx := redact.NewContext(rl.context, "omitting binary blobs from logs") + return rl.fetchBlob(ctx, rl.digest) +} + +// Compressed implements partial.CompressedLayer +func (rl *remoteLayer) Size() (int64, error) { + resp, err := rl.headBlob(rl.digest) + if err != nil { + return -1, err + } + return resp.ContentLength, nil +} + +// Digest implements partial.CompressedLayer +func (rl *remoteLayer) Digest() (v1.Hash, error) { + return rl.digest, nil +} + +// MediaType implements v1.Layer +func (rl *remoteLayer) MediaType() (types.MediaType, error) { + return types.DockerLayer, nil +} + +// Layer reads the given blob reference from a registry as a Layer. A blob +// reference here is just a punned name.Digest where the digest portion is the +// digest of the blob to be read and the repository portion is the repo where +// that blob lives. +func Layer(ref name.Digest, options ...Option) (v1.Layer, error) { + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return nil, err + } + f, err := makeFetcher(ref, o) + if err != nil { + return nil, err + } + h, err := v1.NewHash(ref.Identifier()) + if err != nil { + return nil, err + } + l, err := partial.CompressedToLayer(&remoteLayer{ + fetcher: *f, + digest: h, + }) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: ref, + }, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go new file mode 100644 index 000000000..e4a005aa5 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go @@ -0,0 +1,146 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +type tags struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// List wraps ListWithContext using the background context. +func List(repo name.Repository, options ...Option) ([]string, error) { + return ListWithContext(context.Background(), repo, options...) +} + +// ListWithContext calls /tags/list for the given repository, returning the list of tags +// in the "tags" property. +func ListWithContext(ctx context.Context, repo name.Repository, options ...Option) ([]string, error) { + o, err := makeOptions(repo, options...) + if err != nil { + return nil, err + } + scopes := []string{repo.Scope(transport.PullScope)} + tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) + if err != nil { + return nil, err + } + + uri := &url.URL{ + Scheme: repo.Registry.Scheme(), + Host: repo.Registry.RegistryStr(), + Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()), + // ECR returns an error if n > 1000: + // https://github.com/google/go-containerregistry/issues/681 + RawQuery: "n=1000", + } + + // This is lazy, but I want to make sure List(..., WithContext(ctx)) works + // without calling makeOptions() twice (which can have side effects). + // This means ListWithContext(ctx, ..., WithContext(ctx2)) prefers ctx2. + if o.context != context.Background() { + ctx = o.context + } + + client := http.Client{Transport: tr} + tagList := []string{} + parsed := tags{} + + // get responses until there is no next page + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + req, err := http.NewRequest("GET", uri.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { + return nil, err + } + + if err := resp.Body.Close(); err != nil { + return nil, err + } + + tagList = append(tagList, parsed.Tags...) + + uri, err = getNextPageURL(resp) + if err != nil { + return nil, err + } + // no next page + if uri == nil { + break + } + } + + return tagList, nil +} + +// getNextPageURL checks if there is a Link header in a http.Response which +// contains a link to the next page. If yes it returns the url.URL of the next +// page otherwise it returns nil. +func getNextPageURL(resp *http.Response) (*url.URL, error) { + link := resp.Header.Get("Link") + if link == "" { + return nil, nil + } + + if link[0] != '<' { + return nil, fmt.Errorf("failed to parse link header: missing '<' in: %s", link) + } + + end := strings.Index(link, ">") + if end == -1 { + return nil, fmt.Errorf("failed to parse link header: missing '>' in: %s", link) + } + link = link[1:end] + + linkURL, err := url.Parse(link) + if err != nil { + return nil, err + } + if resp.Request == nil || resp.Request.URL == nil { + return nil, nil + } + linkURL = resp.Request.URL.ResolveReference(linkURL) + return linkURL, nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go new file mode 100644 index 000000000..2800b3303 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go @@ -0,0 +1,90 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" +) + +// MountableLayer wraps a v1.Layer in a shim that enables the layer to be +// "mounted" when published to another registry. +type MountableLayer struct { + v1.Layer + + Reference name.Reference +} + +// Descriptor retains the original descriptor from an image manifest. +// See partial.Descriptor. +func (ml *MountableLayer) Descriptor() (*v1.Descriptor, error) { + return partial.Descriptor(ml.Layer) +} + +// mountableImage wraps the v1.Layer references returned by the embedded v1.Image +// in MountableLayer's so that remote.Write might attempt to mount them from their +// source repository. +type mountableImage struct { + v1.Image + + Reference name.Reference +} + +// Layers implements v1.Image +func (mi *mountableImage) Layers() ([]v1.Layer, error) { + ls, err := mi.Image.Layers() + if err != nil { + return nil, err + } + mls := make([]v1.Layer, 0, len(ls)) + for _, l := range ls { + mls = append(mls, &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }) + } + return mls, nil +} + +// LayerByDigest implements v1.Image +func (mi *mountableImage) LayerByDigest(d v1.Hash) (v1.Layer, error) { + l, err := mi.Image.LayerByDigest(d) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }, nil +} + +// LayerByDiffID implements v1.Image +func (mi *mountableImage) LayerByDiffID(d v1.Hash) (v1.Layer, error) { + l, err := mi.Image.LayerByDiffID(d) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }, nil +} + +// Descriptor retains the original descriptor from an index manifest. +// See partial.Descriptor. +func (mi *mountableImage) Descriptor() (*v1.Descriptor, error) { + return partial.Descriptor(mi.Image) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go new file mode 100644 index 000000000..803c22939 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go @@ -0,0 +1,241 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "fmt" + "net/http" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" + "golang.org/x/sync/errgroup" +) + +// MultiWrite writes the given Images or ImageIndexes to the given refs, as +// efficiently as possible, by deduping shared layer blobs and uploading layers +// in parallel, then uploading all manifests in parallel. +// +// Current limitations: +// - All refs must share the same repository. +// - Images cannot consist of stream.Layers. +func MultiWrite(m map[name.Reference]Taggable, options ...Option) error { + // Determine the repository being pushed to; if asked to push to + // multiple repositories, give up. + var repo, zero name.Repository + for ref := range m { + if repo == zero { + repo = ref.Context() + } else if ref.Context() != repo { + return fmt.Errorf("MultiWrite can only push to the same repository (saw %q and %q)", repo, ref.Context()) + } + } + + // Collect unique blobs (layers and config blobs). + blobs := map[v1.Hash]v1.Layer{} + newManifests := []map[name.Reference]Taggable{} + // Separate originally requested images and indexes, so we can push images first. + images, indexes := map[name.Reference]Taggable{}, map[name.Reference]Taggable{} + var err error + for ref, i := range m { + if img, ok := i.(v1.Image); ok { + images[ref] = i + if err := addImageBlobs(img, blobs); err != nil { + return err + } + continue + } + if idx, ok := i.(v1.ImageIndex); ok { + indexes[ref] = i + newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, 0) + if err != nil { + return err + } + continue + } + return fmt.Errorf("pushable resource was not Image or ImageIndex: %T", i) + } + + o, err := makeOptions(repo, options...) + if err != nil { + return err + } + // Determine if any of the layers are Mountable, because if so we need + // to request Pull scope too. + ls := []v1.Layer{} + for _, l := range blobs { + ls = append(ls, l) + } + scopes := scopesForUploadingImage(repo, ls) + tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) + if err != nil { + return err + } + w := writer{ + repo: repo, + client: &http.Client{Transport: tr}, + context: o.context, + } + + // Upload individual blobs and collect any errors. + blobChan := make(chan v1.Layer, 2*o.jobs) + var g errgroup.Group + for i := 0; i < o.jobs; i++ { + // Start N workers consuming blobs to upload. + g.Go(func() error { + for b := range blobChan { + if err := w.uploadOne(b); err != nil { + return err + } + } + return nil + }) + } + go func() { + for _, b := range blobs { + blobChan <- b + } + close(blobChan) + }() + if err := g.Wait(); err != nil { + return err + } + + commitMany := func(m map[name.Reference]Taggable) error { + // With all of the constituent elements uploaded, upload the manifests + // to commit the images and indexes, and collect any errors. + type task struct { + i Taggable + ref name.Reference + } + taskChan := make(chan task, 2*o.jobs) + for i := 0; i < o.jobs; i++ { + // Start N workers consuming tasks to upload manifests. + g.Go(func() error { + for t := range taskChan { + if err := w.commitManifest(t.i, t.ref); err != nil { + return err + } + } + return nil + }) + } + go func() { + for ref, i := range m { + taskChan <- task{i, ref} + } + close(taskChan) + }() + return g.Wait() + } + // Push originally requested image manifests. These have no + // dependencies. + if err := commitMany(images); err != nil { + return err + } + // Push new manifests from lowest levels up. + for i := len(newManifests) - 1; i >= 0; i-- { + if err := commitMany(newManifests[i]); err != nil { + return err + } + } + // Push originally requested index manifests, which might depend on + // newly discovered manifests. + return commitMany(indexes) + +} + +// addIndexBlobs adds blobs to the set of blobs we intend to upload, and +// returns the latest copy of the ordered collection of manifests to upload. +func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repository, newManifests []map[name.Reference]Taggable, lvl int) ([]map[name.Reference]Taggable, error) { + if lvl > len(newManifests)-1 { + newManifests = append(newManifests, map[name.Reference]Taggable{}) + } + + im, err := idx.IndexManifest() + if err != nil { + return nil, err + } + for _, desc := range im.Manifests { + switch desc.MediaType { + case types.OCIImageIndex, types.DockerManifestList: + idx, err := idx.ImageIndex(desc.Digest) + if err != nil { + return nil, err + } + newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, lvl+1) + if err != nil { + return nil, err + } + + // Also track the sub-index manifest to upload later by digest. + newManifests[lvl][repo.Digest(desc.Digest.String())] = idx + case types.OCIManifestSchema1, types.DockerManifestSchema2: + img, err := idx.Image(desc.Digest) + if err != nil { + return nil, err + } + if err := addImageBlobs(img, blobs); err != nil { + return nil, err + } + + // Also track the sub-image manifest to upload later by digest. + newManifests[lvl][repo.Digest(desc.Digest.String())] = img + default: + return nil, fmt.Errorf("unknown media type: %v", desc.MediaType) + } + } + return newManifests, nil +} + +func addImageBlobs(img v1.Image, blobs map[v1.Hash]v1.Layer) error { + ls, err := img.Layers() + if err != nil { + return err + } + // Collect all layers. + for _, l := range ls { + d, err := l.Digest() + if err != nil { + return err + } + + // Ignore foreign layers. + mt, err := l.MediaType() + if err != nil { + return err + } + if !mt.IsDistributable() { + // TODO(jonjohnsonjr): Add "allow-nondistributable-artifacts" option. + continue + } + + blobs[d] = l + } + + // Collect config blob. + cl, err := partial.ConfigLayer(img) + if err != nil { + return err + } + cld, err := cl.Digest() + if err != nil { + return err + } + blobs[cld] = cl + return nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go new file mode 100644 index 000000000..5544fabf9 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go @@ -0,0 +1,175 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "errors" + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/logs" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +// Option is a functional option for remote operations. +type Option func(*options) error + +type options struct { + auth authn.Authenticator + keychain authn.Keychain + transport http.RoundTripper + platform v1.Platform + context context.Context + jobs int + userAgent string +} + +var defaultPlatform = v1.Platform{ + Architecture: "amd64", + OS: "linux", +} + +const defaultJobs = 4 + +func makeOptions(target authn.Resource, opts ...Option) (*options, error) { + o := &options{ + auth: authn.Anonymous, + transport: http.DefaultTransport, + platform: defaultPlatform, + context: context.Background(), + jobs: defaultJobs, + } + + for _, option := range opts { + if err := option(o); err != nil { + return nil, err + } + } + + if o.keychain != nil { + auth, err := o.keychain.Resolve(target) + if err != nil { + return nil, err + } + if auth == authn.Anonymous { + logs.Warn.Printf("No matching credentials were found for %q, falling back on anonymous", target) + } + o.auth = auth + } + + // Wrap the transport in something that logs requests and responses. + // It's expensive to generate the dumps, so skip it if we're writing + // to nothing. + if logs.Enabled(logs.Debug) { + o.transport = transport.NewLogger(o.transport) + } + + // Wrap the transport in something that can retry network flakes. + o.transport = transport.NewRetry(o.transport) + + // Wrap this last to prevent transport.New from double-wrapping. + if o.userAgent != "" { + o.transport = transport.NewUserAgent(o.transport, o.userAgent) + } + + return o, nil +} + +// WithTransport is a functional option for overriding the default transport +// for remote operations. +// +// The default transport its http.DefaultTransport. +func WithTransport(t http.RoundTripper) Option { + return func(o *options) error { + o.transport = t + return nil + } +} + +// WithAuth is a functional option for overriding the default authenticator +// for remote operations. +// +// The default authenticator is authn.Anonymous. +func WithAuth(auth authn.Authenticator) Option { + return func(o *options) error { + o.auth = auth + return nil + } +} + +// WithAuthFromKeychain is a functional option for overriding the default +// authenticator for remote operations, using an authn.Keychain to find +// credentials. +// +// The default authenticator is authn.Anonymous. +func WithAuthFromKeychain(keys authn.Keychain) Option { + return func(o *options) error { + o.keychain = keys + return nil + } +} + +// WithPlatform is a functional option for overriding the default platform +// that Image and Descriptor.Image use for resolving an index to an image. +// +// The default platform is amd64/linux. +func WithPlatform(p v1.Platform) Option { + return func(o *options) error { + o.platform = p + return nil + } +} + +// WithContext is a functional option for setting the context in http requests +// performed by a given function. Note that this context is used for _all_ +// http requests, not just the initial volley. E.g., for remote.Image, the +// context will be set on http requests generated by subsequent calls to +// RawConfigFile() and even methods on layers returned by Layers(). +// +// The default context is context.Background(). +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.context = ctx + return nil + } +} + +// WithJobs is a functional option for setting the parallelism of remote +// operations performed by a given function. Note that not all remote +// operations support parallelism. +// +// The default value is 4. +func WithJobs(jobs int) Option { + return func(o *options) error { + if jobs <= 0 { + return errors.New("jobs must be greater than zero") + } + o.jobs = jobs + return nil + } +} + +// WithUserAgent adds the given string to the User-Agent header for any HTTP +// requests. This header will also include "go-containerregistry/${version}". +// +// If you want to completely overwrite the User-Agent header, use WithTransport. +func WithUserAgent(ua string) Option { + return func(o *options) error { + o.userAgent = ua + return nil + } +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md new file mode 100644 index 000000000..bd4d957b0 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md @@ -0,0 +1,129 @@ +# `transport` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport) + +The [distribution protocol](https://github.com/opencontainers/distribution-spec) is fairly simple, but correctly [implementing authentication](../../../authn/README.md) is **hard**. + +This package [implements](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#New) an [`http.RoundTripper`](https://godoc.org/net/http#RoundTripper) +that transparently performs: +* [Token +Authentication](https://docs.docker.com/registry/spec/auth/token/) and +* [OAuth2 +Authentication](https://docs.docker.com/registry/spec/auth/oauth/) + +for registry clients. + +## Raison d'être + +> Why not just use the [`docker/distribution`](https://godoc.org/github.com/docker/distribution/registry/client/auth) client? + +Great question! Mostly, because I don't want to depend on [`prometheus/client_golang`](https://github.com/prometheus/client_golang). + +As a performance optimization, that client uses [a cache](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/client/repository.go#L173) to keep track of a mapping between blob digests and their [descriptors](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/blobs.go#L57-L86). Unfortunately, the cache [uses prometheus](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/storage/cache/cachedblobdescriptorstore.go#L44) to track hits and misses, so if you want to use that client you have to pull in all of prometheus, which is pretty large. + +![docker/distribution](../../../../images/docker.dot.svg) + +> Why does it matter if you depend on prometheus? Who cares? + +It's generally polite to your downstream to reduce the number of dependencies your package requires: + +* Downloading your package is faster, which helps our Australian friends and people on airplanes. +* There is less code to compile, which speeds up builds and saves the planet from global warming. +* You reduce the likelihood of inflicting dependency hell upon your consumers. +* [Tim Hockin](https://twitter.com/thockin/status/958606077456654336) prefers it based on his experience working on Kubernetes, and he's a pretty smart guy. + +> Okay, what about [`containerd/containerd`](https://godoc.org/github.com/containerd/containerd/remotes/docker)? + +Similar reasons! That ends up pulling in grpc, protobuf, and logrus. + +![containerd/containerd](../../../../images/containerd.dot.svg) + +> Well... what about [`containers/image`](https://godoc.org/github.com/containers/image/docker)? + +That just uses the the `docker/distribution` client... and more! + +![containers/image](../../../../images/containers.dot.svg) + +> Wow, what about this package? + +Of course, this package isn't perfect either. `transport` depends on `authn`, +which in turn depends on docker's config file parsing and handling package, +which you don't strictly need but almost certainly want if you're going to be +interacting with a registry. + +![google/go-containerregistry](../../../../images/ggcr.dot.svg) + +*These graphs were generated by +[`kisielk/godepgraph`](https://github.com/kisielk/godepgraph).* + +## Usage + +This is heavily used by the +[`remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) +package, which implements higher level image-centric functionality, but this +package is useful if you want to interact directly with the registry to do +something that `remote` doesn't support, e.g. [to handle with schema 1 +images](https://github.com/google/go-containerregistry/pull/509). + +This package also includes some [error +handling](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#errors) +facilities in the form of +[`CheckError`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#CheckError), +which will parse the response body into a structured error for unexpected http +status codes. + +Here's a "simple" program that writes the result of +[listing tags](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#tags) +for [`gcr.io/google-containers/pause`](https://gcr.io/google-containers/pause) +to stdout. + +```go +package main + +import ( + "io" + "net/http" + "os" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +func main() { + repo, err := name.NewRepository("gcr.io/google-containers/pause") + if err != nil { + panic(err) + } + + // Fetch credentials based on your docker config file, which is $HOME/.docker/config.json or $DOCKER_CONFIG. + auth, err := authn.DefaultKeychain.Resolve(repo.Registry) + if err != nil { + panic(err) + } + + // Construct an http.Client that is authorized to pull from gcr.io/google-containers/pause. + scopes := []string{repo.Scope(transport.PullScope)} + t, err := transport.New(repo.Registry, auth, http.DefaultTransport, scopes) + if err != nil { + panic(err) + } + client := &http.Client{Transport: t} + + // Make the actual request. + resp, err := client.Get("https://gcr.io/v2/google-containers/pause/tags/list") + if err != nil { + panic(err) + } + + // Assert that we get a 200, otherwise attempt to parse body as a structured error. + if err := transport.CheckError(resp, http.StatusOK); err != nil { + panic(err) + } + + // Write the response to stdout. + if _, err := io.Copy(os.Stdout, resp.Body); err != nil { + panic(err) + } +} +``` diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go new file mode 100644 index 000000000..fdb362b76 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go @@ -0,0 +1,62 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "encoding/base64" + "fmt" + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" +) + +type basicTransport struct { + inner http.RoundTripper + auth authn.Authenticator + target string +} + +var _ http.RoundTripper = (*basicTransport)(nil) + +// RoundTrip implements http.RoundTripper +func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) { + if bt.auth != authn.Anonymous { + auth, err := bt.auth.Authorization() + if err != nil { + return nil, err + } + + // http.Client handles redirects at a layer above the http.RoundTripper + // abstraction, so to avoid forwarding Authorization headers to places + // we are redirected, only set it when the authorization header matches + // the host with which we are interacting. + // In case of redirect http.Client can use an empty Host, check URL too. + if in.Host == bt.target || in.URL.Host == bt.target { + if bearer := auth.RegistryToken; bearer != "" { + hdr := fmt.Sprintf("Bearer %s", bearer) + in.Header.Set("Authorization", hdr) + } else if user, pass := auth.Username, auth.Password; user != "" && pass != "" { + delimited := fmt.Sprintf("%s:%s", user, pass) + encoded := base64.StdEncoding.EncodeToString([]byte(delimited)) + hdr := fmt.Sprintf("Basic %s", encoded) + in.Header.Set("Authorization", hdr) + } else if token := auth.Auth; token != "" { + hdr := fmt.Sprintf("Basic %s", token) + in.Header.Set("Authorization", hdr) + } + } + } + return bt.inner.RoundTrip(in) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go new file mode 100644 index 000000000..cd05509e7 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go @@ -0,0 +1,308 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + + authchallenge "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/internal/redact" + "github.com/google/go-containerregistry/pkg/name" +) + +type bearerTransport struct { + // Wrapped by bearerTransport. + inner http.RoundTripper + // Basic credentials that we exchange for bearer tokens. + basic authn.Authenticator + // Holds the bearer response from the token service. + bearer authn.AuthConfig + // Registry to which we send bearer tokens. + registry name.Registry + // See https://tools.ietf.org/html/rfc6750#section-3 + realm string + // See https://docs.docker.com/registry/spec/auth/token/ + service string + scopes []string + // Scheme we should use, determined by ping response. + scheme string +} + +var _ http.RoundTripper = (*bearerTransport)(nil) + +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +func stringSet(ss []string) map[string]struct{} { + set := make(map[string]struct{}) + for _, s := range ss { + set[s] = struct{}{} + } + return set +} + +// RoundTrip implements http.RoundTripper +func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { + sendRequest := func() (*http.Response, error) { + // http.Client handles redirects at a layer above the http.RoundTripper + // abstraction, so to avoid forwarding Authorization headers to places + // we are redirected, only set it when the authorization header matches + // the registry with which we are interacting. + // In case of redirect http.Client can use an empty Host, check URL too. + if matchesHost(bt.registry, in, bt.scheme) { + hdr := fmt.Sprintf("Bearer %s", bt.bearer.RegistryToken) + in.Header.Set("Authorization", hdr) + } + return bt.inner.RoundTrip(in) + } + + res, err := sendRequest() + if err != nil { + return nil, err + } + + // If we hit a WWW-Authenticate challenge, it might be due to expired tokens or insufficient scope. + if challenges := authchallenge.ResponseChallenges(res); len(challenges) != 0 { + for _, wac := range challenges { + // TODO(jonjohnsonjr): Should we also update "realm" or "service"? + if scope, ok := wac.Parameters["scope"]; ok { + // From https://tools.ietf.org/html/rfc6750#section-3 + // The "scope" attribute is defined in Section 3.3 of [RFC6749]. The + // "scope" attribute is a space-delimited list of case-sensitive scope + // values indicating the required scope of the access token for + // accessing the requested resource. + scopes := strings.Split(scope, " ") + + // Add any scopes that we don't already request. + got := stringSet(bt.scopes) + for _, want := range scopes { + if _, ok := got[want]; !ok { + bt.scopes = append(bt.scopes, want) + } + } + } + } + + // TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge. + + // Retry the request to attempt to get a valid token. + if err = bt.refresh(in.Context()); err != nil { + return nil, err + } + return sendRequest() + } + + return res, err +} + +// It's unclear which authentication flow to use based purely on the protocol, +// so we rely on heuristics and fallbacks to support as many registries as possible. +// The basic token exchange is attempted first, falling back to the oauth flow. +// If the IdentityToken is set, this indicates that we should start with the oauth flow. +func (bt *bearerTransport) refresh(ctx context.Context) error { + auth, err := bt.basic.Authorization() + if err != nil { + return err + } + + if auth.RegistryToken != "" { + bt.bearer.RegistryToken = auth.RegistryToken + return nil + } + + var content []byte + if auth.IdentityToken != "" { + // If the secret being stored is an identity token, + // the Username should be set to , which indicates + // we are using an oauth flow. + content, err = bt.refreshOauth(ctx) + if terr, ok := err.(*Error); ok && terr.StatusCode == http.StatusNotFound { + // Note: Not all token servers implement oauth2. + // If the request to the endpoint returns 404 using the HTTP POST method, + // refer to Token Documentation for using the HTTP GET method supported by all token servers. + content, err = bt.refreshBasic(ctx) + } + } else { + content, err = bt.refreshBasic(ctx) + } + if err != nil { + return err + } + + // Some registries don't have "token" in the response. See #54. + type tokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + // TODO: handle expiry? + } + + var response tokenResponse + if err := json.Unmarshal(content, &response); err != nil { + return err + } + + // Some registries set access_token instead of token. + if response.AccessToken != "" { + response.Token = response.AccessToken + } + + // Find a token to turn into a Bearer authenticator + if response.Token != "" { + bt.bearer.RegistryToken = response.Token + } else { + return fmt.Errorf("no token in bearer response:\n%s", content) + } + + // If we obtained a refresh token from the oauth flow, use that for refresh() now. + if response.RefreshToken != "" { + bt.basic = authn.FromConfig(authn.AuthConfig{ + IdentityToken: response.RefreshToken, + }) + } + + return nil +} + +func matchesHost(reg name.Registry, in *http.Request, scheme string) bool { + canonicalHeaderHost := canonicalAddress(in.Host, scheme) + canonicalURLHost := canonicalAddress(in.URL.Host, scheme) + canonicalRegistryHost := canonicalAddress(reg.RegistryStr(), scheme) + return canonicalHeaderHost == canonicalRegistryHost || canonicalURLHost == canonicalRegistryHost +} + +func canonicalAddress(host, scheme string) (address string) { + // The host may be any one of: + // - hostname + // - hostname:port + // - ipv4 + // - ipv4:port + // - ipv6 + // - [ipv6]:port + // As net.SplitHostPort returns an error if the host does not contain a port, we should only attempt + // to call it when we know that the address contains a port + if strings.Count(host, ":") == 1 || (strings.Count(host, ":") >= 2 && strings.Contains(host, "]:")) { + hostname, port, err := net.SplitHostPort(host) + if err != nil { + return host + } + if port == "" { + port = portMap[scheme] + } + + return net.JoinHostPort(hostname, port) + } + + return net.JoinHostPort(host, portMap[scheme]) +} + +// https://docs.docker.com/registry/spec/auth/oauth/ +func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) { + auth, err := bt.basic.Authorization() + if err != nil { + return nil, err + } + + u, err := url.Parse(bt.realm) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("scope", strings.Join(bt.scopes, " ")) + v.Set("service", bt.service) + v.Set("client_id", defaultUserAgent) + if auth.IdentityToken != "" { + v.Set("grant_type", "refresh_token") + v.Set("refresh_token", auth.IdentityToken) + } else if auth.Username != "" && auth.Password != "" { + // TODO(#629): This is unreachable. + v.Set("grant_type", "password") + v.Set("username", auth.Username) + v.Set("password", auth.Password) + v.Set("access_type", "offline") + } + + client := http.Client{Transport: bt.inner} + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + // We don't want to log credentials. + ctx = redact.NewContext(ctx, "oauth token response contains credentials") + + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + return ioutil.ReadAll(resp.Body) +} + +// https://docs.docker.com/registry/spec/auth/token/ +func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) { + u, err := url.Parse(bt.realm) + if err != nil { + return nil, err + } + b := &basicTransport{ + inner: bt.inner, + auth: bt.basic, + target: u.Host, + } + client := http.Client{Transport: b} + + v := u.Query() + v["scope"] = bt.scopes + v.Set("service", bt.service) + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + // We don't want to log credentials. + ctx = redact.NewContext(ctx, "basic token response contains credentials") + + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + return ioutil.ReadAll(resp.Body) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go new file mode 100644 index 000000000..ff7025b5c --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides facilities for setting up an authenticated +// http.RoundTripper given an Authenticator and base RoundTripper. See +// transport.New for more information. +package transport diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go new file mode 100644 index 000000000..567080389 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go @@ -0,0 +1,196 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +// The set of query string keys that we expect to send as part of the registry +// protocol. Anything else is potentially dangerous to leak, as it's probably +// from a redirect. These redirects often included tokens or signed URLs. +var paramAllowlist = map[string]struct{}{ + // Token exchange + "scope": {}, + "service": {}, + // Cross-repo mounting + "mount": {}, + "from": {}, + // Layer PUT + "digest": {}, + // Listing tags and catalog + "n": {}, + "last": {}, +} + +// Error implements error to support the following error specification: +// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors +type Error struct { + Errors []Diagnostic `json:"errors,omitempty"` + // The http status code returned. + StatusCode int + // The raw body if we couldn't understand it. + rawBody string + // The request that failed. + request *http.Request +} + +// Check that Error implements error +var _ error = (*Error)(nil) + +// Error implements error +func (e *Error) Error() string { + prefix := "" + if e.request != nil { + prefix = fmt.Sprintf("%s %s: ", e.request.Method, redactURL(e.request.URL)) + } + return prefix + e.responseErr() +} + +func (e *Error) responseErr() string { + switch len(e.Errors) { + case 0: + if len(e.rawBody) == 0 { + if e.request != nil && e.request.Method == http.MethodHead { + return fmt.Sprintf("unexpected status code %d %s (HEAD responses have no body, use GET for details)", e.StatusCode, http.StatusText(e.StatusCode)) + } + return fmt.Sprintf("unexpected status code %d %s", e.StatusCode, http.StatusText(e.StatusCode)) + } + return fmt.Sprintf("unexpected status code %d %s: %s", e.StatusCode, http.StatusText(e.StatusCode), e.rawBody) + case 1: + return e.Errors[0].String() + default: + var errors []string + for _, d := range e.Errors { + errors = append(errors, d.String()) + } + return fmt.Sprintf("multiple errors returned: %s", + strings.Join(errors, "; ")) + } +} + +// Temporary returns whether the request that preceded the error is temporary. +func (e *Error) Temporary() bool { + if len(e.Errors) == 0 { + _, ok := temporaryStatusCodes[e.StatusCode] + return ok + } + for _, d := range e.Errors { + if _, ok := temporaryErrorCodes[d.Code]; !ok { + return false + } + } + return true +} + +// TODO(jonjohnsonjr): Consider moving to pkg/internal/redact. +func redactURL(original *url.URL) *url.URL { + qs := original.Query() + for k, v := range qs { + for i := range v { + if _, ok := paramAllowlist[k]; !ok { + // key is not in the Allowlist + v[i] = "REDACTED" + } + } + } + redacted := *original + redacted.RawQuery = qs.Encode() + return &redacted +} + +// Diagnostic represents a single error returned by a Docker registry interaction. +type Diagnostic struct { + Code ErrorCode `json:"code"` + Message string `json:"message,omitempty"` + Detail interface{} `json:"detail,omitempty"` +} + +// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail] +func (d Diagnostic) String() string { + msg := fmt.Sprintf("%s: %s", d.Code, d.Message) + if d.Detail != nil { + msg = fmt.Sprintf("%s; %v", msg, d.Detail) + } + return msg +} + +// ErrorCode is an enumeration of supported error codes. +type ErrorCode string + +// The set of error conditions a registry may return: +// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2 +const ( + BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN" + BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID" + BlobUploadUnknownErrorCode ErrorCode = "BLOB_UPLOAD_UNKNOWN" + DigestInvalidErrorCode ErrorCode = "DIGEST_INVALID" + ManifestBlobUnknownErrorCode ErrorCode = "MANIFEST_BLOB_UNKNOWN" + ManifestInvalidErrorCode ErrorCode = "MANIFEST_INVALID" + ManifestUnknownErrorCode ErrorCode = "MANIFEST_UNKNOWN" + ManifestUnverifiedErrorCode ErrorCode = "MANIFEST_UNVERIFIED" + NameInvalidErrorCode ErrorCode = "NAME_INVALID" + NameUnknownErrorCode ErrorCode = "NAME_UNKNOWN" + SizeInvalidErrorCode ErrorCode = "SIZE_INVALID" + TagInvalidErrorCode ErrorCode = "TAG_INVALID" + UnauthorizedErrorCode ErrorCode = "UNAUTHORIZED" + DeniedErrorCode ErrorCode = "DENIED" + UnsupportedErrorCode ErrorCode = "UNSUPPORTED" + TooManyRequestsErrorCode ErrorCode = "TOOMANYREQUESTS" +) + +// TODO: Include other error types. +var temporaryErrorCodes = map[ErrorCode]struct{}{ + BlobUploadInvalidErrorCode: {}, + TooManyRequestsErrorCode: {}, +} + +var temporaryStatusCodes = map[int]struct{}{ + http.StatusInternalServerError: {}, + http.StatusBadGateway: {}, + http.StatusServiceUnavailable: {}, +} + +// CheckError returns a structured error if the response status is not in codes. +func CheckError(resp *http.Response, codes ...int) error { + for _, code := range codes { + if resp.StatusCode == code { + // This is one of the supported status codes. + return nil + } + } + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors + structuredError := &Error{} + + // This can fail if e.g. the response body is not valid JSON. That's fine, + // we'll construct an appropriate error string from the body and status code. + _ = json.Unmarshal(b, structuredError) + + structuredError.rawBody = string(b) + structuredError.StatusCode = resp.StatusCode + structuredError.request = resp.Request + + return structuredError +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go new file mode 100644 index 000000000..3b0248663 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go @@ -0,0 +1,91 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "net/http" + "net/http/httputil" + "time" + + "github.com/google/go-containerregistry/pkg/internal/redact" + "github.com/google/go-containerregistry/pkg/logs" +) + +type logTransport struct { + inner http.RoundTripper +} + +// NewLogger returns a transport that logs requests and responses to +// github.com/google/go-containerregistry/pkg/logs.Debug. +func NewLogger(inner http.RoundTripper) http.RoundTripper { + return &logTransport{inner} +} + +func (t *logTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { + // Inspired by: github.com/motemen/go-loghttp + + // We redact token responses and binary blobs in response/request. + omitBody, reason := redact.FromContext(in.Context()) + if omitBody { + logs.Debug.Printf("--> %s %s [body redacted: %s]", in.Method, in.URL, reason) + } else { + logs.Debug.Printf("--> %s %s", in.Method, in.URL) + } + + // Save these headers so we can redact Authorization. + savedHeaders := in.Header.Clone() + if in.Header != nil && in.Header.Get("authorization") != "" { + in.Header.Set("authorization", "") + } + + b, err := httputil.DumpRequestOut(in, !omitBody) + if err == nil { + logs.Debug.Println(string(b)) + } else { + logs.Debug.Printf("Failed to dump request %s %s: %v", in.Method, in.URL, err) + } + + // Restore the non-redacted headers. + in.Header = savedHeaders + + start := time.Now() + out, err = t.inner.RoundTrip(in) + duration := time.Since(start) + if err != nil { + logs.Debug.Printf("<-- %v %s %s (%s)", err, in.Method, in.URL, duration) + } + if out != nil { + msg := fmt.Sprintf("<-- %d", out.StatusCode) + if out.Request != nil { + msg = fmt.Sprintf("%s %s", msg, out.Request.URL) + } + msg = fmt.Sprintf("%s (%s)", msg, duration) + + if omitBody { + msg = fmt.Sprintf("%s [body redacted: %s]", msg, reason) + } + + logs.Debug.Print(msg) + + b, err := httputil.DumpResponse(out, !omitBody) + if err == nil { + logs.Debug.Println(string(b)) + } else { + logs.Debug.Printf("Failed to dump response %s %s: %v", in.Method, in.URL, err) + } + } + return +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go new file mode 100644 index 000000000..396d4e034 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go @@ -0,0 +1,129 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + authchallenge "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/google/go-containerregistry/pkg/name" +) + +type challenge string + +const ( + anonymous challenge = "anonymous" + basic challenge = "basic" + bearer challenge = "bearer" +) + +type pingResp struct { + challenge challenge + + // Following the challenge there are often key/value pairs + // e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz" + parameters map[string]string + + // The registry's scheme to use. Communicates whether we fell back to http. + scheme string +} + +func (c challenge) Canonical() challenge { + return challenge(strings.ToLower(string(c))) +} + +func parseChallenge(suffix string) map[string]string { + kv := make(map[string]string) + for _, token := range strings.Split(suffix, ",") { + // Trim any whitespace around each token. + token = strings.Trim(token, " ") + + // Break the token into a key/value pair + if parts := strings.SplitN(token, "=", 2); len(parts) == 2 { + // Unquote the value, if it is quoted. + kv[parts[0]] = strings.Trim(parts[1], `"`) + } else { + // If there was only one part, treat is as a key with an empty value + kv[token] = "" + } + } + return kv +} + +func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingResp, error) { + client := http.Client{Transport: t} + + // This first attempts to use "https" for every request, falling back to http + // if the registry matches our localhost heuristic or if it is intentionally + // set to insecure via name.NewInsecureRegistry. + schemes := []string{"https"} + if reg.Scheme() == "http" { + schemes = append(schemes, "http") + } + + var errs []string + for _, scheme := range schemes { + url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name()) + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + errs = append(errs, err.Error()) + // Potentially retry with http. + continue + } + defer func() { + // By draining the body, make sure to reuse the connection made by + // the ping for the following access to the registry + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() + + switch resp.StatusCode { + case http.StatusOK: + // If we get a 200, then no authentication is needed. + return &pingResp{ + challenge: anonymous, + scheme: scheme, + }, nil + case http.StatusUnauthorized: + if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 { + // If we hit more than one, I'm not even sure what to do. + wac := challenges[0] + return &pingResp{ + challenge: challenge(wac.Scheme).Canonical(), + parameters: wac.Parameters, + scheme: scheme, + }, nil + } + // Otherwise, just return the challenge without parameters. + return &pingResp{ + challenge: challenge(resp.Header.Get("WWW-Authenticate")).Canonical(), + scheme: scheme, + }, nil + default: + return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized) + } + } + return nil, errors.New(strings.Join(errs, "; ")) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go new file mode 100644 index 000000000..672622b61 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go @@ -0,0 +1,88 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net/http" + "time" + + "github.com/google/go-containerregistry/pkg/internal/retry" +) + +// Sleep for 0.1, 0.3, 0.9, 2.7 seconds. This should cover networking blips. +var defaultBackoff = retry.Backoff{ + Duration: 100 * time.Millisecond, + Factor: 3.0, + Jitter: 0.1, + Steps: 5, +} + +var _ http.RoundTripper = (*retryTransport)(nil) + +// retryTransport wraps a RoundTripper and retries temporary network errors. +type retryTransport struct { + inner http.RoundTripper + backoff retry.Backoff + predicate retry.Predicate +} + +// Option is a functional option for retryTransport. +type Option func(*options) + +type options struct { + backoff retry.Backoff + predicate retry.Predicate +} + +// WithRetryBackoff sets the backoff for retry operations. +func WithRetryBackoff(backoff retry.Backoff) Option { + return func(o *options) { + o.backoff = backoff + } +} + +// WithRetryPredicate sets the predicate for retry operations. +func WithRetryPredicate(predicate func(error) bool) Option { + return func(o *options) { + o.predicate = predicate + } +} + +// NewRetry returns a transport that retries errors. +func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper { + o := &options{ + backoff: defaultBackoff, + predicate: retry.IsTemporary, + } + + for _, opt := range opts { + opt(o) + } + + return &retryTransport{ + inner: inner, + backoff: o.backoff, + predicate: o.predicate, + } +} + +func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { + roundtrip := func() error { + out, err = t.inner.RoundTrip(in) + return err + } + retry.Retry(roundtrip, t.predicate, t.backoff) + return +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go new file mode 100644 index 000000000..d70b6a850 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go @@ -0,0 +1,44 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net/http" + + "github.com/google/go-containerregistry/pkg/name" +) + +type schemeTransport struct { + // Scheme we should use, determined by ping response. + scheme string + + // Registry we're talking to. + registry name.Registry + + // Wrapped by schemeTransport. + inner http.RoundTripper +} + +// RoundTrip implements http.RoundTripper +func (st *schemeTransport) RoundTrip(in *http.Request) (*http.Response, error) { + // When we ping() the registry, we determine whether to use http or https + // based on which scheme was successful. That is only valid for the + // registry server and not e.g. a separate token server or blob storage, + // so we should only override the scheme if the host is the registry. + if matchesHost(st.registry, in, st.scheme) { + in.URL.Scheme = st.scheme + } + return st.inner.RoundTrip(in) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go new file mode 100644 index 000000000..c3b56f7a4 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +// Scopes suitable to qualify each Repository +const ( + PullScope string = "pull" + PushScope string = "push,pull" + // For now DELETE is PUSH, which is the read/write ACL. + DeleteScope string = PushScope + CatalogScope string = "catalog" +) diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go new file mode 100644 index 000000000..5c35fc7c9 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go @@ -0,0 +1,103 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "fmt" + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" +) + +// New returns a new RoundTripper based on the provided RoundTripper that has been +// setup to authenticate with the remote registry "reg", in the capacity +// laid out by the specified scopes. +// +// TODO(jonjohnsonjr): Deprecate this. +func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { + return NewWithContext(context.Background(), reg, auth, t, scopes) +} + +// NewWithContext returns a new RoundTripper based on the provided RoundTripper that has been +// setup to authenticate with the remote registry "reg", in the capacity +// laid out by the specified scopes. +func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { + // The handshake: + // 1. Use "t" to ping() the registry for the authentication challenge. + // + // 2a. If we get back a 200, then simply use "t". + // + // 2b. If we get back a 401 with a Basic challenge, then use a transport + // that just attachs auth each roundtrip. + // + // 2c. If we get back a 401 with a Bearer challenge, then use a transport + // that attaches a bearer token to each request, and refreshes is on 401s. + // Perform an initial refresh to seed the bearer token. + + // First we ping the registry to determine the parameters of the authentication handshake + // (if one is even necessary). + pr, err := ping(ctx, reg, t) + if err != nil { + return nil, err + } + + // Wrap t with a useragent transport unless we already have one. + if _, ok := t.(*userAgentTransport); !ok { + t = NewUserAgent(t, "") + } + + // Wrap t in a transport that selects the appropriate scheme based on the ping response. + t = &schemeTransport{ + scheme: pr.scheme, + registry: reg, + inner: t, + } + + switch pr.challenge.Canonical() { + case anonymous: + return t, nil + case basic: + return &basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}, nil + case bearer: + // We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth. + realm, ok := pr.parameters["realm"] + if !ok { + return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters) + } + service, ok := pr.parameters["service"] + if !ok { + // If the service parameter is not specified, then default it to the registry + // with which we are talking. + service = reg.String() + } + bt := &bearerTransport{ + inner: t, + basic: auth, + realm: realm, + registry: reg, + service: service, + scopes: scopes, + scheme: pr.scheme, + } + if err := bt.refresh(ctx); err != nil { + return nil, err + } + return bt, nil + default: + return nil, fmt.Errorf("unrecognized challenge: %s", pr.challenge) + } +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go new file mode 100644 index 000000000..74a9e71bd --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go @@ -0,0 +1,94 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "net/http" + "runtime/debug" +) + +var ( + // Version can be set via: + // -ldflags="-X 'github.com/google/go-containerregistry/pkg/v1/remote/transport.Version=$TAG'" + Version string + + ggcrVersion = defaultUserAgent +) + +const ( + defaultUserAgent = "go-containerregistry" + moduleName = "github.com/google/go-containerregistry" +) + +type userAgentTransport struct { + inner http.RoundTripper + ua string +} + +func init() { + if v := version(); v != "" { + ggcrVersion = fmt.Sprintf("%s/%s", defaultUserAgent, v) + } +} + +func version() string { + if Version != "" { + // Version was set via ldflags, just return it. + return Version + } + + info, ok := debug.ReadBuildInfo() + if !ok { + return "" + } + + // Happens for crane and gcrane. + if info.Main.Path == moduleName { + return info.Main.Version + } + + // Anything else. + for _, dep := range info.Deps { + if dep.Path == moduleName { + return dep.Version + } + } + + return "" +} + +// NewUserAgent returns an http.Roundtripper that sets the user agent to +// The provided string plus additional go-containerregistry information, +// e.g. if provided "crane/v0.1.4" and this modules was built at v0.1.4: +// +// User-Agent: crane/v0.1.4 go-containerregistry/v0.1.4 +func NewUserAgent(inner http.RoundTripper, ua string) http.RoundTripper { + if ua == "" { + ua = ggcrVersion + } else { + ua = fmt.Sprintf("%s %s", ua, ggcrVersion) + } + return &userAgentTransport{ + inner: inner, + ua: ua, + } +} + +// RoundTrip implements http.RoundTripper +func (ut *userAgentTransport) RoundTrip(in *http.Request) (*http.Response, error) { + in.Header.Set("User-Agent", ut.ua) + return ut.inner.RoundTrip(in) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go new file mode 100644 index 000000000..d850203d5 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -0,0 +1,615 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "github.com/google/go-containerregistry/pkg/internal/redact" + "github.com/google/go-containerregistry/pkg/internal/retry" + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" + "golang.org/x/sync/errgroup" +) + +// Taggable is an interface that enables a manifest PUT (e.g. for tagging). +type Taggable interface { + RawManifest() ([]byte, error) +} + +// Write pushes the provided img to the specified image reference. +func Write(ref name.Reference, img v1.Image, options ...Option) error { + ls, err := img.Layers() + if err != nil { + return err + } + + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return err + } + + scopes := scopesForUploadingImage(ref.Context(), ls) + tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) + if err != nil { + return err + } + w := writer{ + repo: ref.Context(), + client: &http.Client{Transport: tr}, + context: o.context, + } + + // Upload individual layers in goroutines and collect any errors. + // If we can dedupe by the layer digest, try to do so. If we can't determine + // the digest for whatever reason, we can't dedupe and might re-upload. + var g errgroup.Group + uploaded := map[v1.Hash]bool{} + for _, l := range ls { + l := l + + // Handle foreign layers. + mt, err := l.MediaType() + if err != nil { + return err + } + if !mt.IsDistributable() { + // TODO(jonjohnsonjr): Add "allow-nondistributable-artifacts" option. + continue + } + + // Streaming layers calculate their digests while uploading them. Assume + // an error here indicates we need to upload the layer. + h, err := l.Digest() + if err == nil { + // If we can determine the layer's digest ahead of + // time, use it to dedupe uploads. + if uploaded[h] { + continue // Already uploading. + } + uploaded[h] = true + } + + // TODO(#803): Pipe through remote.WithJobs and upload these in parallel. + g.Go(func() error { + return w.uploadOne(l) + }) + } + + if l, err := partial.ConfigLayer(img); err != nil { + // We can't read the ConfigLayer, possibly because of streaming layers, + // since the layer DiffIDs haven't been calculated yet. Attempt to wait + // for the other layers to be uploaded, then try the config again. + if err := g.Wait(); err != nil { + return err + } + + // Now that all the layers are uploaded, try to upload the config file blob. + l, err := partial.ConfigLayer(img) + if err != nil { + return err + } + if err := w.uploadOne(l); err != nil { + return err + } + } else { + // We *can* read the ConfigLayer, so upload it concurrently with the layers. + g.Go(func() error { + return w.uploadOne(l) + }) + + // Wait for the layers + config. + if err := g.Wait(); err != nil { + return err + } + } + + // With all of the constituent elements uploaded, upload the manifest + // to commit the image. + return w.commitManifest(img, ref) +} + +// writer writes the elements of an image to a remote image reference. +type writer struct { + repo name.Repository + client *http.Client + context context.Context +} + +// url returns a url.Url for the specified path in the context of this remote image reference. +func (w *writer) url(path string) url.URL { + return url.URL{ + Scheme: w.repo.Registry.Scheme(), + Host: w.repo.RegistryStr(), + Path: path, + } +} + +// nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence. +func (w *writer) nextLocation(resp *http.Response) (string, error) { + loc := resp.Header.Get("Location") + if len(loc) == 0 { + return "", errors.New("missing Location header") + } + u, err := url.Parse(loc) + if err != nil { + return "", err + } + + // If the location header returned is just a url path, then fully qualify it. + // We cannot simply call w.url, since there might be an embedded query string. + return resp.Request.URL.ResolveReference(u).String(), nil +} + +// checkExistingBlob checks if a blob exists already in the repository by making a +// HEAD request to the blob store API. GCR performs an existence check on the +// initiation if "mount" is specified, even if no "from" sources are specified. +// However, this is not broadly applicable to all registries, e.g. ECR. +func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) { + u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.repo.RepositoryStr(), h.String())) + + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return false, err + } + + resp, err := w.client.Do(req.WithContext(w.context)) + if err != nil { + return false, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + return false, err + } + + return resp.StatusCode == http.StatusOK, nil +} + +// checkExistingManifest checks if a manifest exists already in the repository +// by making a HEAD request to the manifest API. +func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, error) { + u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), h.String())) + + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return false, err + } + req.Header.Set("Accept", string(mt)) + + resp, err := w.client.Do(req.WithContext(w.context)) + if err != nil { + return false, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + return false, err + } + + return resp.StatusCode == http.StatusOK, nil +} + +// initiateUpload initiates the blob upload, which starts with a POST that can +// optionally include the hash of the layer and a list of repositories from +// which that layer might be read. On failure, an error is returned. +// On success, the layer was either mounted (nothing more to do) or a blob +// upload was initiated and the body of that blob should be sent to the returned +// location. +func (w *writer) initiateUpload(from, mount string) (location string, mounted bool, err error) { + u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.repo.RepositoryStr())) + uv := url.Values{} + if mount != "" && from != "" { + // Quay will fail if we specify a "mount" without a "from". + uv["mount"] = []string{mount} + uv["from"] = []string{from} + } + u.RawQuery = uv.Encode() + + // Make the request to initiate the blob upload. + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return "", false, err + } + req.Header.Set("Content-Type", "application/json") + resp, err := w.client.Do(req.WithContext(w.context)) + if err != nil { + return "", false, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { + return "", false, err + } + + // Check the response code to determine the result. + switch resp.StatusCode { + case http.StatusCreated: + // We're done, we were able to fast-path. + return "", true, nil + case http.StatusAccepted: + // Proceed to PATCH, upload has begun. + loc, err := w.nextLocation(resp) + return loc, false, err + default: + panic("Unreachable: initiateUpload") + } +} + +// streamBlob streams the contents of the blob to the specified location. +// On failure, this will return an error. On success, this will return the location +// header indicating how to commit the streamed blob. +func (w *writer) streamBlob(ctx context.Context, blob io.ReadCloser, streamLocation string) (commitLocation string, err error) { + req, err := http.NewRequest(http.MethodPatch, streamLocation, blob) + if err != nil { + return "", err + } + + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { + return "", err + } + + // The blob has been uploaded, return the location header indicating + // how to commit this layer. + return w.nextLocation(resp) +} + +// commitBlob commits this blob by sending a PUT to the location returned from +// streaming the blob. +func (w *writer) commitBlob(location, digest string) error { + u, err := url.Parse(location) + if err != nil { + return err + } + v := u.Query() + v.Set("digest", digest) + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodPut, u.String(), nil) + if err != nil { + return err + } + + resp, err := w.client.Do(req.WithContext(w.context)) + if err != nil { + return err + } + defer resp.Body.Close() + + return transport.CheckError(resp, http.StatusCreated) +} + +// uploadOne performs a complete upload of a single layer. +func (w *writer) uploadOne(l v1.Layer) error { + var from, mount string + if h, err := l.Digest(); err == nil { + // If we know the digest, this isn't a streaming layer. Do an existence + // check so we can skip uploading the layer if possible. + existing, err := w.checkExistingBlob(h) + if err != nil { + return err + } + if existing { + logs.Progress.Printf("existing blob: %v", h) + return nil + } + + mount = h.String() + } + if ml, ok := l.(*MountableLayer); ok { + if w.repo.RegistryStr() == ml.Reference.Context().RegistryStr() { + from = ml.Reference.Context().RepositoryStr() + } + } + + ctx := w.context + + tryUpload := func() error { + location, mounted, err := w.initiateUpload(from, mount) + if err != nil { + return err + } else if mounted { + h, err := l.Digest() + if err != nil { + return err + } + logs.Progress.Printf("mounted blob: %s", h.String()) + return nil + } + + // Only log layers with +json or +yaml. We can let through other stuff if it becomes popular. + // TODO(opencontainers/image-spec#791): Would be great to have an actual parser. + mt, err := l.MediaType() + if err != nil { + return err + } + smt := string(mt) + if !(strings.HasSuffix(smt, "+json") || strings.HasSuffix(smt, "+yaml")) { + ctx = redact.NewContext(ctx, "omitting binary blobs from logs") + } + + blob, err := l.Compressed() + if err != nil { + return err + } + location, err = w.streamBlob(ctx, blob, location) + if err != nil { + return err + } + + h, err := l.Digest() + if err != nil { + return err + } + digest := h.String() + + if err := w.commitBlob(location, digest); err != nil { + return err + } + logs.Progress.Printf("pushed blob: %s", digest) + return nil + } + + // Try this three times, waiting 1s after first failure, 3s after second. + backoff := retry.Backoff{ + Duration: 1.0 * time.Second, + Factor: 3.0, + Jitter: 0.1, + Steps: 3, + } + + return retry.Retry(tryUpload, retry.IsTemporary, backoff) +} + +func (w *writer) writeIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) error { + index, err := ii.IndexManifest() + if err != nil { + return err + } + + // TODO(#803): Pipe through remote.WithJobs and upload these in parallel. + for _, desc := range index.Manifests { + ref := ref.Context().Digest(desc.Digest.String()) + exists, err := w.checkExistingManifest(desc.Digest, desc.MediaType) + if err != nil { + return err + } + if exists { + logs.Progress.Print("existing manifest: ", desc.Digest) + continue + } + + switch desc.MediaType { + case types.OCIImageIndex, types.DockerManifestList: + ii, err := ii.ImageIndex(desc.Digest) + if err != nil { + return err + } + + if err := w.writeIndex(ref, ii); err != nil { + return err + } + case types.OCIManifestSchema1, types.DockerManifestSchema2: + img, err := ii.Image(desc.Digest) + if err != nil { + return err + } + // TODO: Ideally we could reuse this writer, but we need to know + // scopes before we do the token exchange. To be lazy here, just + // re-do the token exchange. MultiWrite fixes this. + if err := Write(ref, img, options...); err != nil { + return err + } + } + } + + // With all of the constituent elements uploaded, upload the manifest + // to commit the image. + return w.commitManifest(ii, ref) +} + +type withMediaType interface { + MediaType() (types.MediaType, error) +} + +// This is really silly, but go interfaces don't let me satisfy remote.Taggable +// with remote.Descriptor because of name collisions between method names and +// struct fields. +// +// Use reflection to either pull the v1.Descriptor out of remote.Descriptor or +// create a descriptor based on the RawManifest and (optionally) MediaType. +func unpackTaggable(t Taggable) ([]byte, *v1.Descriptor, error) { + if d, ok := t.(*Descriptor); ok { + return d.Manifest, &d.Descriptor, nil + } + b, err := t.RawManifest() + if err != nil { + return nil, nil, err + } + + // A reasonable default if Taggable doesn't implement MediaType. + mt := types.DockerManifestSchema2 + + if wmt, ok := t.(withMediaType); ok { + m, err := wmt.MediaType() + if err != nil { + return nil, nil, err + } + mt = m + } + + h, sz, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, nil, err + } + + return b, &v1.Descriptor{ + MediaType: mt, + Size: sz, + Digest: h, + }, nil +} + +// commitManifest does a PUT of the image's manifest. +func (w *writer) commitManifest(t Taggable, ref name.Reference) error { + raw, desc, err := unpackTaggable(t) + if err != nil { + return err + } + + u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), ref.Identifier())) + + // Make the request to PUT the serialized manifest + req, err := http.NewRequest(http.MethodPut, u.String(), bytes.NewBuffer(raw)) + if err != nil { + return err + } + req.Header.Set("Content-Type", string(desc.MediaType)) + + resp, err := w.client.Do(req.WithContext(w.context)) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { + return err + } + + // The image was successfully pushed! + logs.Progress.Printf("%v: digest: %v size: %d", ref, desc.Digest, desc.Size) + return nil +} + +func scopesForUploadingImage(repo name.Repository, layers []v1.Layer) []string { + // use a map as set to remove duplicates scope strings + scopeSet := map[string]struct{}{} + + for _, l := range layers { + if ml, ok := l.(*MountableLayer); ok { + // we will add push scope for ref.Context() after the loop. + // for now we ask pull scope for references of the same registry + if ml.Reference.Context().String() != repo.String() && ml.Reference.Context().Registry.String() == repo.Registry.String() { + scopeSet[ml.Reference.Scope(transport.PullScope)] = struct{}{} + } + } + } + + scopes := make([]string, 0) + // Push scope should be the first element because a few registries just look at the first scope to determine access. + scopes = append(scopes, repo.Scope(transport.PushScope)) + + for scope := range scopeSet { + scopes = append(scopes, scope) + } + + return scopes +} + +// WriteIndex pushes the provided ImageIndex to the specified image reference. +// WriteIndex will attempt to push all of the referenced manifests before +// attempting to push the ImageIndex, to retain referential integrity. +func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) error { + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return err + } + scopes := []string{ref.Scope(transport.PushScope)} + tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) + if err != nil { + return err + } + w := writer{ + repo: ref.Context(), + client: &http.Client{Transport: tr}, + context: o.context, + } + return w.writeIndex(ref, ii, options...) +} + +// WriteLayer uploads the provided Layer to the specified repo. +func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) error { + o, err := makeOptions(repo, options...) + if err != nil { + return err + } + scopes := scopesForUploadingImage(repo, []v1.Layer{layer}) + tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) + if err != nil { + return err + } + w := writer{ + repo: repo, + client: &http.Client{Transport: tr}, + context: o.context, + } + + return w.uploadOne(layer) +} + +// Tag adds a tag to the given Taggable via PUT /v2/.../manifests/ +// +// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and +// remote.Descriptor. +// +// If t implements MediaType, we will use that for the Content-Type, otherwise +// we will default to types.DockerManifestSchema2. +// +// Tag does not attempt to write anything other than the manifest, so callers +// should ensure that all blobs or manifests that are referenced by t exist +// in the target registry. +func Tag(tag name.Tag, t Taggable, options ...Option) error { + o, err := makeOptions(tag.Context(), options...) + if err != nil { + return err + } + scopes := []string{tag.Scope(transport.PushScope)} + + // TODO: This *always* does a token exchange. For some registries, + // that's pretty slow. Some ideas; + // * Tag could take a list of tags. + // * Allow callers to pass in a transport.Transport, typecheck + // it to allow them to reuse the transport across multiple calls. + // * WithTag option to do multiple manifest PUTs in commitManifest. + tr, err := transport.NewWithContext(o.context, tag.Context().Registry, o.auth, o.transport, scopes) + if err != nil { + return err + } + w := writer{ + repo: tag.Context(), + client: &http.Client{Transport: tr}, + context: o.context, + } + + return w.commitManifest(t, tag) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md new file mode 100644 index 000000000..da0dda48d --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md @@ -0,0 +1,68 @@ +# `stream` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream) + +The `stream` package contains an implementation of +[`v1.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1#Layer) +that supports _streaming_ access, i.e. the layer contents are read once and not +buffered. + +## Usage + +```go +package main + +import ( + "os" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/stream" +) + +// upload the contents of stdin as a layer to a local registry +func main() { + repo, err := name.NewRepository("localhost:5000/stream") + if err != nil { + panic(err) + } + + layer := stream.NewLayer(os.Stdin) + + if err := remote.WriteLayer(repo, layer); err != nil { + panic(err) + } +} +``` + +## Structure + +This implements the layer portion of an [image +upload](/pkg/v1/remote#anatomy-of-an-image-upload). We launch a goroutine that +is responsible for hashing the uncompressed contents to compute the `DiffID`, +gzipping them to produce the `Compressed` contents, and hashing/counting the +bytes to produce the `Digest`/`Size`. This goroutine writes to an +`io.PipeWriter`, which blocks until `Compressed` reads the gzipped contents from +the corresponding `io.PipeReader`. + +

+ +

+ +## Caveats + +This assumes that you have an uncompressed layer (i.e. a tarball) and would like +to compress it. Calling `Uncompressed` is always an error. Likewise, other +methods are invalid until the contents of `Compressed` have been completely +consumed and `Close`d. + +Using a `stream.Layer` will likely not work without careful consideration. For +example, in the `mutate` package, we defer computing the manifest and config +file until they are actually called. This allows you to `mutate.Append` a +streaming layer to an image without accidentally consuming it. Similarly, in +`remote.Write`, if calling `Digest` on a layer fails, we attempt to upload the +layer anyway, understanding that we may be dealing with a `stream.Layer` whose +contents need to be uploaded before we can upload the config file. + +Given the [structure](#structure) of how this is implemented, forgetting to +`Close` a `stream.Layer` will leak a goroutine. diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go new file mode 100644 index 000000000..e91f57ab3 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go @@ -0,0 +1,242 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stream + +import ( + "bufio" + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "errors" + "hash" + "io" + "os" + "sync" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +var ( + // ErrNotComputed is returned when the requested value is not yet + // computed because the stream has not been consumed yet. + ErrNotComputed = errors.New("value not computed until stream is consumed") + + // ErrConsumed is returned by Compressed when the underlying stream has + // already been consumed and closed. + ErrConsumed = errors.New("stream was already consumed") +) + +// Layer is a streaming implementation of v1.Layer. +type Layer struct { + blob io.ReadCloser + consumed bool + compression int + + mu sync.Mutex + digest, diffID *v1.Hash + size int64 +} + +var _ v1.Layer = (*Layer)(nil) + +// LayerOption applies options to layer +type LayerOption func(*Layer) + +// WithCompressionLevel sets the gzip compression. See `gzip.NewWriterLevel` for possible values. +func WithCompressionLevel(level int) LayerOption { + return func(l *Layer) { + l.compression = level + } +} + +// NewLayer creates a Layer from an io.ReadCloser. +func NewLayer(rc io.ReadCloser, opts ...LayerOption) *Layer { + layer := &Layer{ + blob: rc, + compression: gzip.BestSpeed, + } + + for _, opt := range opts { + opt(layer) + } + + return layer +} + +// Digest implements v1.Layer. +func (l *Layer) Digest() (v1.Hash, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.digest == nil { + return v1.Hash{}, ErrNotComputed + } + return *l.digest, nil +} + +// DiffID implements v1.Layer. +func (l *Layer) DiffID() (v1.Hash, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.diffID == nil { + return v1.Hash{}, ErrNotComputed + } + return *l.diffID, nil +} + +// Size implements v1.Layer. +func (l *Layer) Size() (int64, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.size == 0 { + return 0, ErrNotComputed + } + return l.size, nil +} + +// MediaType implements v1.Layer +func (l *Layer) MediaType() (types.MediaType, error) { + // We return DockerLayer for now as uncompressed layers + // are unimplemented + return types.DockerLayer, nil +} + +// Uncompressed implements v1.Layer. +func (l *Layer) Uncompressed() (io.ReadCloser, error) { + return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented") +} + +// Compressed implements v1.Layer. +func (l *Layer) Compressed() (io.ReadCloser, error) { + if l.consumed { + return nil, ErrConsumed + } + return newCompressedReader(l) +} + +type compressedReader struct { + closer io.Closer // original blob's Closer. + + h, zh hash.Hash // collects digests of compressed and uncompressed stream. + pr io.Reader + bw *bufio.Writer + count *countWriter + + l *Layer // stream.Layer to update upon Close. +} + +func newCompressedReader(l *Layer) (*compressedReader, error) { + h := sha256.New() + zh := sha256.New() + count := &countWriter{} + + // gzip.Writer writes to the output stream via pipe, a hasher to + // capture compressed digest, and a countWriter to capture compressed + // size. + pr, pw := io.Pipe() + + // Write compressed bytes to be read by the pipe.Reader, hashed by zh, and counted by count. + mw := io.MultiWriter(pw, zh, count) + + // Buffer the output of the gzip writer so we don't have to wait on pr to keep writing. + // 64K ought to be small enough for anybody. + bw := bufio.NewWriterSize(mw, 2<<16) + zw, err := gzip.NewWriterLevel(bw, l.compression) + if err != nil { + return nil, err + } + + cr := &compressedReader{ + closer: newMultiCloser(zw, l.blob), + pr: pr, + bw: bw, + h: h, + zh: zh, + count: count, + l: l, + } + go func() { + if _, err := io.Copy(io.MultiWriter(h, zw), l.blob); err != nil { + pw.CloseWithError(err) + return + } + // Now close the compressed reader, to flush the gzip stream + // and calculate digest/diffID/size. This will cause pr to + // return EOF which will cause readers of the Compressed stream + // to finish reading. + pw.CloseWithError(cr.Close()) + }() + + return cr, nil +} + +func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) } + +func (cr *compressedReader) Close() error { + cr.l.mu.Lock() + defer cr.l.mu.Unlock() + + // Close the inner ReadCloser. + if err := cr.closer.Close(); err != nil { + return err + } + + // Flush the buffer. + if err := cr.bw.Flush(); err != nil { + return err + } + + diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.h.Sum(nil))) + if err != nil { + return err + } + cr.l.diffID = &diffID + + digest, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.zh.Sum(nil))) + if err != nil { + return err + } + cr.l.digest = &digest + + cr.l.size = cr.count.n + cr.l.consumed = true + return nil +} + +// countWriter counts bytes written to it. +type countWriter struct{ n int64 } + +func (c *countWriter) Write(p []byte) (int, error) { + c.n += int64(len(p)) + return len(p), nil +} + +// multiCloser is a Closer that collects multiple Closers and Closes them in order. +type multiCloser []io.Closer + +var _ io.Closer = (multiCloser)(nil) + +func newMultiCloser(c ...io.Closer) multiCloser { return multiCloser(c) } + +func (m multiCloser) Close() error { + for _, c := range m { + // NOTE: net/http will call close on success, so if we've already + // closed the inner rc, it's not an error. + if err := c.Close(); err != nil && !errors.Is(err, os.ErrClosed) { + return err + } + } + return nil +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md new file mode 100644 index 000000000..03f339b06 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md @@ -0,0 +1,280 @@ +# `tarball` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball) + +This package produces tarballs that can consumed via `docker load`. Note +that this is a _different_ format from the [`legacy`](/pkg/legacy/tarball) +tarballs that are produced by `docker save`, but this package is still able to +read the legacy tarballs produced by `docker save`. + +## Usage + +```go +package main + +import ( + "os" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/tarball" +) + +func main() { + // Read a tarball from os.Args[1] that contains ubuntu. + tag, err := name.NewTag("ubuntu") + if err != nil { + panic(err) + } + img, err := tarball.ImageFromPath(os.Args[1], &tag) + if err != nil { + panic(err) + } + + // Write that tarball to os.Args[2] with a different tag. + newTag, err := name.NewTag("ubuntu:newest") + if err != nil { + panic(err) + } + f, err := os.Create(os.Args[2]) + if err != nil { + panic(err) + } + defer f.Close() + + if err := tarball.Write(newTag, img, f); err != nil { + panic(err) + } +} +``` + +## Structure + +

+ +

+ +Let's look at what happens when we write out a tarball: + + +### `ubuntu:latest` + +``` +$ crane pull ubuntu ubuntu.tar && mkdir ubuntu && tar xf ubuntu.tar -C ubuntu && rm ubuntu.tar +$ tree ubuntu/ +ubuntu/ +├── 423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954.tar.gz +├── b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7.tar.gz +├── de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d.tar.gz +├── f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b.tar.gz +├── manifest.json +└── sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c + +0 directories, 6 files +``` + +There are a couple interesting files here. + +`manifest.json` is the entrypoint: a list of [`tarball.Descriptor`s](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball#Descriptor) +that describe the images contained in this tarball. + +For each image, this has the `RepoTags` (how it was pulled), a `Config` file +that points to the image's config file, a list of `Layers`, and (optionally) +`LayerSources`. + +``` +$ jq < ubuntu/manifest.json +[ + { + "Config": "sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c", + "RepoTags": [ + "ubuntu" + ], + "Layers": [ + "423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954.tar.gz", + "de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d.tar.gz", + "f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b.tar.gz", + "b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7.tar.gz" + ] + } +] +``` + +The config file and layers are exactly what you would expect, and match the +registry representations of the same artifacts. You'll notice that the +`manifest.json` contains similar information as the registry manifest, but isn't +quite the same: + +``` +$ crane manifest ubuntu@sha256:0925d086715714114c1988f7c947db94064fd385e171a63c07730f1fa014e6f9 +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 3408, + "digest": "sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 26692096, + "digest": "sha256:423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 35365, + "digest": "sha256:de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 852, + "digest": "sha256:f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 163, + "digest": "sha256:b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7" + } + ] +} +``` + +This makes it difficult to maintain image digests when roundtripping images +through the tarball format, so it's not a great format if you care about +provenance. + +The ubuntu example didn't have any `LayerSources` -- let's look at another image +that does. + +### `hello-world:nanoserver` + +``` +$ crane pull hello-world:nanoserver@sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b nanoserver.tar +$ mkdir nanoserver && tar xf nanoserver.tar -C nanoserver && rm nanoserver.tar +$ tree nanoserver/ +nanoserver/ +├── 10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0.tar.gz +├── a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053.tar.gz +├── be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a.tar.gz +├── manifest.json +└── sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6 + +0 directories, 5 files + +$ jq < nanoserver/manifest.json +[ + { + "Config": "sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6", + "RepoTags": [ + "index.docker.io/library/hello-world:i-was-a-digest" + ], + "Layers": [ + "a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053.tar.gz", + "be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a.tar.gz", + "10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0.tar.gz" + ], + "LayerSources": { + "sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e": { + "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", + "size": 101145811, + "digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", + "urls": [ + "https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" + ] + } + } + } +] +``` + +A couple things to note about this `manifest.json` versus the other: +* The `RepoTags` field is a bit weird here. `hello-world` is a multi-platform + image, so We had to pull this image by digest, since we're (I'm) on + amd64/linux and wanted to grab a windows image. Since the tarball format + expects a tag under `RepoTags`, and we didn't pull by tag, we replace the + digest with a sentinel `i-was-a-digest` "tag" to appease docker. +* The `LayerSources` has enough information to reconstruct the foreign layers + pointer when pushing/pulling from the registry. For legal reasons, microsoft + doesn't want anyone but them to serve windows base images, so the mediaType + here indicates a "foreign" or "non-distributable" layer with an URL for where + you can download it from microsoft (see the [OCI + image-spec](https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers)). + +We can look at what's in the registry to explain both of these things: +``` +$ crane manifest hello-world:nanoserver | jq . +{ + "manifests": [ + { + "digest": "sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b", + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "platform": { + "architecture": "amd64", + "os": "windows", + "os.version": "10.0.17763.1040" + }, + "size": 1124 + } + ], + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "schemaVersion": 2 +} + + +# Note the media type and "urls" field. +$ crane manifest hello-world:nanoserver@sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b | jq . +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 1721, + "digest": "sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", + "size": 101145811, + "digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", + "urls": [ + "https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" + ] + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 1669, + "digest": "sha256:be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 949, + "digest": "sha256:10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0" + } + ] +} +``` + +The `LayerSources` map is keyed by the diffid. Note that `sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e` matches the first layer in the config file: +``` +$ jq '.[0].LayerSources' < nanoserver/manifest.json +{ + "sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e": { + "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", + "size": 101145811, + "digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", + "urls": [ + "https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" + ] + } +} + +$ jq < nanoserver/sha256\:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6 | jq .rootfs +{ + "type": "layers", + "diff_ids": [ + "sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e", + "sha256:601cf7d78c62e4b4d32a7bbf96a17606a9cea5bd9d22ffa6f34aa431d056b0e8", + "sha256:a1e1a3bf6529adcce4d91dce2cad86c2604a66b507ccbc4d2239f3da0ec5aab9" + ] +} +``` diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go new file mode 100644 index 000000000..4eb79bb4e --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tarball provides facilities for reading/writing v1.Images from/to +// a tarball on-disk. +package tarball diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go new file mode 100644 index 000000000..08f7a8124 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go @@ -0,0 +1,401 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "sync" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/internal/gzip" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type image struct { + opener Opener + manifest *Manifest + config []byte + imgDescriptor *Descriptor + + tag *name.Tag +} + +type uncompressedImage struct { + *image +} + +type compressedImage struct { + *image + manifestLock sync.Mutex // Protects manifest + manifest *v1.Manifest +} + +var _ partial.UncompressedImageCore = (*uncompressedImage)(nil) +var _ partial.CompressedImageCore = (*compressedImage)(nil) + +// Opener is a thunk for opening a tar file. +type Opener func() (io.ReadCloser, error) + +func pathOpener(path string) Opener { + return func() (io.ReadCloser, error) { + return os.Open(path) + } +} + +// ImageFromPath returns a v1.Image from a tarball located on path. +func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) { + return Image(pathOpener(path), tag) +} + +// Image exposes an image from the tarball at the provided path. +func Image(opener Opener, tag *name.Tag) (v1.Image, error) { + img := &image{ + opener: opener, + tag: tag, + } + if err := img.loadTarDescriptorAndConfig(); err != nil { + return nil, err + } + + // Peek at the first layer and see if it's compressed. + if len(img.imgDescriptor.Layers) > 0 { + compressed, err := img.areLayersCompressed() + if err != nil { + return nil, err + } + if compressed { + c := compressedImage{ + image: img, + } + return partial.CompressedToImage(&c) + } + } + + uc := uncompressedImage{ + image: img, + } + return partial.UncompressedToImage(&uc) +} + +func (i *image) MediaType() (types.MediaType, error) { + return types.DockerManifestSchema2, nil +} + +// Descriptor stores the manifest data for a single image inside a `docker save` tarball. +type Descriptor struct { + Config string + RepoTags []string + Layers []string + + // Tracks foreign layer info. Key is DiffID. + LayerSources map[v1.Hash]v1.Descriptor `json:",omitempty"` +} + +// Manifest represents the manifests of all images as the `manifest.json` file in a `docker save` tarball. +type Manifest []Descriptor + +func (m Manifest) findDescriptor(tag *name.Tag) (*Descriptor, error) { + if tag == nil { + if len(m) != 1 { + return nil, errors.New("tarball must contain only a single image to be used with tarball.Image") + } + return &(m)[0], nil + } + for _, img := range m { + for _, tagStr := range img.RepoTags { + repoTag, err := name.NewTag(tagStr) + if err != nil { + return nil, err + } + + // Compare the resolved names, since there are several ways to specify the same tag. + if repoTag.Name() == tag.Name() { + return &img, nil + } + } + } + return nil, fmt.Errorf("tag %s not found in tarball", tag) +} + +func (i *image) areLayersCompressed() (bool, error) { + if len(i.imgDescriptor.Layers) == 0 { + return false, errors.New("0 layers found in image") + } + layer := i.imgDescriptor.Layers[0] + blob, err := extractFileFromTar(i.opener, layer) + if err != nil { + return false, err + } + defer blob.Close() + return gzip.Is(blob) +} + +func (i *image) loadTarDescriptorAndConfig() error { + m, err := extractFileFromTar(i.opener, "manifest.json") + if err != nil { + return err + } + defer m.Close() + + if err := json.NewDecoder(m).Decode(&i.manifest); err != nil { + return err + } + + if i.manifest == nil { + return errors.New("no valid manifest.json in tarball") + } + + i.imgDescriptor, err = i.manifest.findDescriptor(i.tag) + if err != nil { + return err + } + + cfg, err := extractFileFromTar(i.opener, i.imgDescriptor.Config) + if err != nil { + return err + } + defer cfg.Close() + + i.config, err = ioutil.ReadAll(cfg) + if err != nil { + return err + } + return nil +} + +func (i *image) RawConfigFile() ([]byte, error) { + return i.config, nil +} + +// tarFile represents a single file inside a tar. Closing it closes the tar itself. +type tarFile struct { + io.Reader + io.Closer +} + +func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) { + f, err := opener() + if err != nil { + return nil, err + } + close := true + defer func() { + if close { + f.Close() + } + }() + + tf := tar.NewReader(f) + for { + hdr, err := tf.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if hdr.Name == filePath { + close = false + return tarFile{ + Reader: tf, + Closer: f, + }, nil + } + } + return nil, fmt.Errorf("file %s not found in tar", filePath) +} + +// uncompressedLayerFromTarball implements partial.UncompressedLayer +type uncompressedLayerFromTarball struct { + diffID v1.Hash + mediaType types.MediaType + opener Opener + filePath string +} + +// foreignUncompressedLayer implements partial.UncompressedLayer but returns +// a custom descriptor. This allows the foreign layer URLs to be included in +// the generated image manifest for uncompressed layers. +type foreignUncompressedLayer struct { + uncompressedLayerFromTarball + desc v1.Descriptor +} + +func (fl *foreignUncompressedLayer) Descriptor() (*v1.Descriptor, error) { + return &fl.desc, nil +} + +// DiffID implements partial.UncompressedLayer +func (ulft *uncompressedLayerFromTarball) DiffID() (v1.Hash, error) { + return ulft.diffID, nil +} + +// Uncompressed implements partial.UncompressedLayer +func (ulft *uncompressedLayerFromTarball) Uncompressed() (io.ReadCloser, error) { + return extractFileFromTar(ulft.opener, ulft.filePath) +} + +func (ulft *uncompressedLayerFromTarball) MediaType() (types.MediaType, error) { + return ulft.mediaType, nil +} + +func (i *uncompressedImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { + cfg, err := partial.ConfigFile(i) + if err != nil { + return nil, err + } + for idx, diffID := range cfg.RootFS.DiffIDs { + if diffID == h { + // Technically the media type should be 'application/tar' but given that our + // v1.Layer doesn't force consumers to care about whether the layer is compressed + // we should be fine returning the DockerLayer media type + mt := types.DockerLayer + if bd, ok := i.imgDescriptor.LayerSources[h]; ok { + // Overwrite the mediaType for foreign layers. + return &foreignUncompressedLayer{ + uncompressedLayerFromTarball: uncompressedLayerFromTarball{ + diffID: diffID, + mediaType: bd.MediaType, + opener: i.opener, + filePath: i.imgDescriptor.Layers[idx], + }, + desc: bd, + }, nil + } + return &uncompressedLayerFromTarball{ + diffID: diffID, + mediaType: mt, + opener: i.opener, + filePath: i.imgDescriptor.Layers[idx], + }, nil + } + } + return nil, fmt.Errorf("diff id %q not found", h) +} + +func (c *compressedImage) Manifest() (*v1.Manifest, error) { + c.manifestLock.Lock() + defer c.manifestLock.Unlock() + if c.manifest != nil { + return c.manifest, nil + } + + b, err := c.RawConfigFile() + if err != nil { + return nil, err + } + + cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + + c.manifest = &v1.Manifest{ + SchemaVersion: 2, + MediaType: types.DockerManifestSchema2, + Config: v1.Descriptor{ + MediaType: types.DockerConfigJSON, + Size: cfgSize, + Digest: cfgHash, + }, + } + + for i, p := range c.imgDescriptor.Layers { + cfg, err := partial.ConfigFile(c) + if err != nil { + return nil, err + } + diffid := cfg.RootFS.DiffIDs[i] + if d, ok := c.imgDescriptor.LayerSources[diffid]; ok { + // If it's a foreign layer, just append the descriptor so we can avoid + // reading the entire file. + c.manifest.Layers = append(c.manifest.Layers, d) + } else { + l, err := extractFileFromTar(c.opener, p) + if err != nil { + return nil, err + } + defer l.Close() + sha, size, err := v1.SHA256(l) + if err != nil { + return nil, err + } + c.manifest.Layers = append(c.manifest.Layers, v1.Descriptor{ + MediaType: types.DockerLayer, + Size: size, + Digest: sha, + }) + } + } + return c.manifest, nil +} + +func (c *compressedImage) RawManifest() ([]byte, error) { + return partial.RawManifest(c) +} + +// compressedLayerFromTarball implements partial.CompressedLayer +type compressedLayerFromTarball struct { + desc v1.Descriptor + opener Opener + filePath string +} + +// Digest implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Digest() (v1.Hash, error) { + return clft.desc.Digest, nil +} + +// Compressed implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Compressed() (io.ReadCloser, error) { + return extractFileFromTar(clft.opener, clft.filePath) +} + +// MediaType implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) MediaType() (types.MediaType, error) { + return clft.desc.MediaType, nil +} + +// Size implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Size() (int64, error) { + return clft.desc.Size, nil +} + +func (c *compressedImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { + m, err := c.Manifest() + if err != nil { + return nil, err + } + for i, l := range m.Layers { + if l.Digest == h { + fp := c.imgDescriptor.Layers[i] + return &compressedLayerFromTarball{ + desc: l, + opener: c.opener, + filePath: fp, + }, nil + } + } + return nil, fmt.Errorf("blob %v not found", h) +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go new file mode 100644 index 000000000..dc8684ceb --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go @@ -0,0 +1,282 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "bytes" + "compress/gzip" + "io" + "io/ioutil" + "os" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/internal/and" + gestargz "github.com/google/go-containerregistry/pkg/v1/internal/estargz" + ggzip "github.com/google/go-containerregistry/pkg/v1/internal/gzip" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type layer struct { + digest v1.Hash + diffID v1.Hash + size int64 + compressedopener Opener + uncompressedopener Opener + compression int + annotations map[string]string + estgzopts []estargz.Option +} + +// Descriptor implements partial.withDescriptor. +func (l *layer) Descriptor() (*v1.Descriptor, error) { + digest, err := l.Digest() + if err != nil { + return nil, err + } + return &v1.Descriptor{ + Size: l.size, + Digest: digest, + Annotations: l.annotations, + MediaType: types.DockerLayer, + }, nil +} + +// Digest implements v1.Layer +func (l *layer) Digest() (v1.Hash, error) { + return l.digest, nil +} + +// DiffID implements v1.Layer +func (l *layer) DiffID() (v1.Hash, error) { + return l.diffID, nil +} + +// Compressed implements v1.Layer +func (l *layer) Compressed() (io.ReadCloser, error) { + return l.compressedopener() +} + +// Uncompressed implements v1.Layer +func (l *layer) Uncompressed() (io.ReadCloser, error) { + return l.uncompressedopener() +} + +// Size implements v1.Layer +func (l *layer) Size() (int64, error) { + return l.size, nil +} + +// MediaType implements v1.Layer +func (l *layer) MediaType() (types.MediaType, error) { + return types.DockerLayer, nil +} + +// LayerOption applies options to layer +type LayerOption func(*layer) + +// WithCompressionLevel is a functional option for overriding the default +// compression level used for compressing uncompressed tarballs. +func WithCompressionLevel(level int) LayerOption { + return func(l *layer) { + l.compression = level + } +} + +// WithCompressedCaching is a functional option that overrides the +// logic for accessing the compressed bytes to memoize the result +// and avoid expensive repeated gzips. +func WithCompressedCaching(l *layer) { + var once sync.Once + var err error + + buf := bytes.NewBuffer(nil) + og := l.compressedopener + + l.compressedopener = func() (io.ReadCloser, error) { + once.Do(func() { + var rc io.ReadCloser + rc, err = og() + if err == nil { + defer rc.Close() + _, err = io.Copy(buf, rc) + } + }) + if err != nil { + return nil, err + } + + return ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil + } +} + +// WithEstargzOptions is a functional option that allow the caller to pass +// through estargz.Options to the underlying compression layer. This is +// only meaningful when estargz is enabled. +func WithEstargzOptions(opts ...estargz.Option) LayerOption { + return func(l *layer) { + l.estgzopts = opts + } +} + +// WithEstargz is a functional option that explicitly enables estargz support. +func WithEstargz(l *layer) { + oguncompressed := l.uncompressedopener + estargz := func() (io.ReadCloser, error) { + crc, err := oguncompressed() + if err != nil { + return nil, err + } + eopts := append(l.estgzopts, estargz.WithCompressionLevel(l.compression)) + rc, h, err := gestargz.ReadCloser(crc, eopts...) + if err != nil { + return nil, err + } + l.annotations[estargz.TOCJSONDigestAnnotation] = h.String() + return &and.ReadCloser{ + Reader: rc, + CloseFunc: func() error { + err := rc.Close() + if err != nil { + return err + } + // As an optimization, leverage the DiffID exposed by the estargz ReadCloser + l.diffID, err = v1.NewHash(rc.DiffID().String()) + return err + }, + }, nil + } + uncompressed := func() (io.ReadCloser, error) { + urc, err := estargz() + if err != nil { + return nil, err + } + return ggzip.UnzipReadCloser(urc) + } + + l.compressedopener = estargz + l.uncompressedopener = uncompressed +} + +// LayerFromFile returns a v1.Layer given a tarball +func LayerFromFile(path string, opts ...LayerOption) (v1.Layer, error) { + opener := func() (io.ReadCloser, error) { + return os.Open(path) + } + return LayerFromOpener(opener, opts...) +} + +// LayerFromOpener returns a v1.Layer given an Opener function. +// The Opener may return either an uncompressed tarball (common), +// or a compressed tarball (uncommon). +// +// When using this in conjunction with something like remote.Write +// the uncompressed path may end up gzipping things multiple times: +// 1. Compute the layer SHA256 +// 2. Upload the compressed layer. +// Since gzip can be expensive, we support an option to memoize the +// compression that can be passed here: tarball.WithCompressedCaching +func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) { + rc, err := opener() + if err != nil { + return nil, err + } + defer rc.Close() + + compressed, err := ggzip.Is(rc) + if err != nil { + return nil, err + } + + layer := &layer{ + compression: gzip.BestSpeed, + annotations: make(map[string]string, 1), + } + + if estgz := os.Getenv("GGCR_EXPERIMENT_ESTARGZ"); estgz == "1" { + opts = append([]LayerOption{WithEstargz}, opts...) + } + + if compressed { + layer.compressedopener = opener + layer.uncompressedopener = func() (io.ReadCloser, error) { + urc, err := opener() + if err != nil { + return nil, err + } + return ggzip.UnzipReadCloser(urc) + } + } else { + layer.uncompressedopener = opener + layer.compressedopener = func() (io.ReadCloser, error) { + crc, err := opener() + if err != nil { + return nil, err + } + return ggzip.ReadCloserLevel(crc, layer.compression), nil + } + } + + for _, opt := range opts { + opt(layer) + } + + if layer.digest, layer.size, err = computeDigest(layer.compressedopener); err != nil { + return nil, err + } + + empty := v1.Hash{} + if layer.diffID == empty { + if layer.diffID, err = computeDiffID(layer.uncompressedopener); err != nil { + return nil, err + } + } + + return layer, nil +} + +// LayerFromReader returns a v1.Layer given a io.Reader. +func LayerFromReader(reader io.Reader, opts ...LayerOption) (v1.Layer, error) { + // Buffering due to Opener requiring multiple calls. + a, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + return LayerFromOpener(func() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(a)), nil + }, opts...) +} + +func computeDigest(opener Opener) (v1.Hash, int64, error) { + rc, err := opener() + if err != nil { + return v1.Hash{}, 0, err + } + defer rc.Close() + + return v1.SHA256(rc) +} + +func computeDiffID(opener Opener) (v1.Hash, error) { + rc, err := opener() + if err != nil { + return v1.Hash{}, err + } + defer rc.Close() + + digest, _, err := v1.SHA256(rc) + return digest, err +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go new file mode 100644 index 000000000..f9298158e --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go @@ -0,0 +1,455 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" +) + +// WriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.Write with a new file. +func WriteToFile(p string, ref name.Reference, img v1.Image, opts ...WriteOption) error { + w, err := os.Create(p) + if err != nil { + return err + } + defer w.Close() + + return Write(ref, img, w, opts...) +} + +// MultiWriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.MultiWrite with a new file. +func MultiWriteToFile(p string, tagToImage map[name.Tag]v1.Image, opts ...WriteOption) error { + refToImage := make(map[name.Reference]v1.Image, len(tagToImage)) + for i, d := range tagToImage { + refToImage[i] = d + } + return MultiRefWriteToFile(p, refToImage, opts...) +} + +// MultiRefWriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.MultiRefWrite with a new file. +func MultiRefWriteToFile(p string, refToImage map[name.Reference]v1.Image, opts ...WriteOption) error { + w, err := os.Create(p) + if err != nil { + return err + } + defer w.Close() + + return MultiRefWrite(refToImage, w, opts...) +} + +// Write is a wrapper to write a single image and tag to a tarball. +func Write(ref name.Reference, img v1.Image, w io.Writer, opts ...WriteOption) error { + return MultiRefWrite(map[name.Reference]v1.Image{ref: img}, w, opts...) +} + +// MultiWrite writes the contents of each image to the provided reader, in the compressed format. +// The contents are written in the following format: +// One manifest.json file at the top level containing information about several images. +// One file for each layer, named after the layer's SHA. +// One file for the config blob, named after its SHA. +func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer, opts ...WriteOption) error { + refToImage := make(map[name.Reference]v1.Image, len(tagToImage)) + for i, d := range tagToImage { + refToImage[i] = d + } + return MultiRefWrite(refToImage, w, opts...) +} + +// MultiRefWrite writes the contents of each image to the provided reader, in the compressed format. +// The contents are written in the following format: +// One manifest.json file at the top level containing information about several images. +// One file for each layer, named after the layer's SHA. +// One file for the config blob, named after its SHA. +func MultiRefWrite(refToImage map[name.Reference]v1.Image, w io.Writer, opts ...WriteOption) error { + // process options + o := &writeOptions{ + updates: nil, + } + for _, option := range opts { + if err := option(o); err != nil { + return err + } + } + + size, _, mBytes, err := getSizeAndManifest(refToImage) + if err != nil { + return sendUpdateReturn(o, err) + } + + return writeImagesToTar(refToImage, mBytes, size, w, o) +} + +// sendUpdateReturn return the passed in error message, also sending on update channel, if it exists +func sendUpdateReturn(o *writeOptions, err error) error { + if o != nil && o.updates != nil { + o.updates <- v1.Update{ + Error: err, + } + } + return err +} + +// sendProgressWriterReturn return the passed in error message, also sending on update channel, if it exists, along with downloaded information +func sendProgressWriterReturn(pw *progressWriter, err error) error { + if pw != nil { + return pw.Error(err) + } + return err +} + +// writeImagesToTar writes the images to the tarball +func writeImagesToTar(refToImage map[name.Reference]v1.Image, m []byte, size int64, w io.Writer, o *writeOptions) (err error) { + if w == nil { + return sendUpdateReturn(o, errors.New("must pass valid writer")) + } + imageToTags := dedupRefToImage(refToImage) + + tw := w + var pw *progressWriter + + // we only calculate the sizes and use a progressWriter if we were provided + // an option with a progress channel + if o != nil && o.updates != nil { + pw = &progressWriter{ + w: w, + updates: o.updates, + size: size, + } + tw = pw + } + + tf := tar.NewWriter(tw) + defer tf.Close() + + seenLayerDigests := make(map[string]struct{}) + + for img := range imageToTags { + // Write the config. + cfgName, err := img.ConfigName() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + cfgBlob, err := img.RawConfigFile() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + if err := writeTarEntry(tf, cfgName.String(), bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { + return sendProgressWriterReturn(pw, err) + } + + // Write the layers. + layers, err := img.Layers() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + layerFiles := make([]string, len(layers)) + for i, l := range layers { + d, err := l.Digest() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + // Munge the file name to appease ancient technology. + // + // tar assumes anything with a colon is a remote tape drive: + // https://www.gnu.org/software/tar/manual/html_section/tar_45.html + // Drop the algorithm prefix, e.g. "sha256:" + hex := d.Hex + + // gunzip expects certain file extensions: + // https://www.gnu.org/software/gzip/manual/html_node/Overview.html + layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) + + if _, ok := seenLayerDigests[hex]; ok { + continue + } + seenLayerDigests[hex] = struct{}{} + + r, err := l.Compressed() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + blobSize, err := l.Size() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + + if err := writeTarEntry(tf, layerFiles[i], r, blobSize); err != nil { + return sendProgressWriterReturn(pw, err) + } + } + } + if err := writeTarEntry(tf, "manifest.json", bytes.NewReader(m), int64(len(m))); err != nil { + return sendProgressWriterReturn(pw, err) + } + + // be sure to close the tar writer so everything is flushed out before we send our EOF + if err := tf.Close(); err != nil { + return sendProgressWriterReturn(pw, err) + } + // send an EOF to indicate finished on the channel, but nil as our return error + _ = sendProgressWriterReturn(pw, io.EOF) + return nil +} + +// calculateManifest calculates the manifest and optionally the size of the tar file +func calculateManifest(refToImage map[name.Reference]v1.Image) (m Manifest, err error) { + imageToTags := dedupRefToImage(refToImage) + + if len(imageToTags) == 0 { + return nil, errors.New("set of images is empty") + } + + for img, tags := range imageToTags { + cfgName, err := img.ConfigName() + if err != nil { + return nil, err + } + + // Store foreign layer info. + layerSources := make(map[v1.Hash]v1.Descriptor) + + // Write the layers. + layers, err := img.Layers() + if err != nil { + return nil, err + } + layerFiles := make([]string, len(layers)) + for i, l := range layers { + d, err := l.Digest() + if err != nil { + return nil, err + } + // Munge the file name to appease ancient technology. + // + // tar assumes anything with a colon is a remote tape drive: + // https://www.gnu.org/software/tar/manual/html_section/tar_45.html + // Drop the algorithm prefix, e.g. "sha256:" + hex := d.Hex + + // gunzip expects certain file extensions: + // https://www.gnu.org/software/gzip/manual/html_node/Overview.html + layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) + + // Add to LayerSources if it's a foreign layer. + desc, err := partial.BlobDescriptor(img, d) + if err != nil { + return nil, err + } + if !desc.MediaType.IsDistributable() { + diffid, err := partial.BlobToDiffID(img, d) + if err != nil { + return nil, err + } + layerSources[diffid] = *desc + } + } + + // Generate the tar descriptor and write it. + m = append(m, Descriptor{ + Config: cfgName.String(), + RepoTags: tags, + Layers: layerFiles, + LayerSources: layerSources, + }) + } + // sort by name of the repotags so it is consistent. Alternatively, we could sort by hash of the + // descriptor, but that would make it hard for humans to process + sort.Slice(m, func(i, j int) bool { + return strings.Join(m[i].RepoTags, ",") < strings.Join(m[j].RepoTags, ",") + }) + + return m, nil +} + +// CalculateSize calculates the expected complete size of the output tar file +func CalculateSize(refToImage map[name.Reference]v1.Image) (size int64, err error) { + size, _, _, err = getSizeAndManifest(refToImage) + return size, err +} + +func getSizeAndManifest(refToImage map[name.Reference]v1.Image) (size int64, m Manifest, mBytes []byte, err error) { + m, err = calculateManifest(refToImage) + if err != nil { + return 0, nil, nil, fmt.Errorf("unable to calculate manifest: %v", err) + } + mBytes, err = json.Marshal(m) + if err != nil { + return 0, nil, nil, fmt.Errorf("could not marshall manifest to bytes: %v", err) + } + + size, err = calculateTarballSize(refToImage, mBytes) + if err != nil { + return 0, nil, nil, fmt.Errorf("error calculating tarball size: %v", err) + } + return size, m, mBytes, nil +} + +// calculateTarballSize calculates the size of the tar file +func calculateTarballSize(refToImage map[name.Reference]v1.Image, mBytes []byte) (size int64, err error) { + imageToTags := dedupRefToImage(refToImage) + + for img, name := range imageToTags { + manifest, err := img.Manifest() + if err != nil { + return size, fmt.Errorf("unable to get manifest for img %s: %v", name, err) + } + size += calculateSingleFileInTarSize(manifest.Config.Size) + for _, l := range manifest.Layers { + size += calculateSingleFileInTarSize(l.Size) + } + } + // add the manifest + size += calculateSingleFileInTarSize(int64(len(mBytes))) + + // add the two padding blocks that indicate end of a tar file + size += 1024 + return size, nil +} + +func dedupRefToImage(refToImage map[name.Reference]v1.Image) map[v1.Image][]string { + imageToTags := make(map[v1.Image][]string) + + for ref, img := range refToImage { + if tag, ok := ref.(name.Tag); ok { + if tags, ok := imageToTags[img]; !ok || tags == nil { + imageToTags[img] = []string{} + } + // Docker cannot load tarballs without an explicit tag: + // https://github.com/google/go-containerregistry/issues/890 + // + // We can't use the fully qualified tag.Name() because of rules_docker: + // https://github.com/google/go-containerregistry/issues/527 + // + // If the tag is "latest", but tag.String() doesn't end in ":latest", + // just append it. Kind of gross, but should work for now. + ts := tag.String() + if tag.Identifier() == name.DefaultTag && !strings.HasSuffix(ts, ":"+name.DefaultTag) { + ts = fmt.Sprintf("%s:%s", ts, name.DefaultTag) + } + imageToTags[img] = append(imageToTags[img], ts) + } else { + if _, ok := imageToTags[img]; !ok { + imageToTags[img] = nil + } + } + } + + return imageToTags +} + +// writeTarEntry writes a file to the provided writer with a corresponding tar header +func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { + hdr := &tar.Header{ + Mode: 0644, + Typeflag: tar.TypeReg, + Size: size, + Name: path, + } + if err := tf.WriteHeader(hdr); err != nil { + return err + } + _, err := io.Copy(tf, r) + return err +} + +// ComputeManifest get the manifest.json that will be written to the tarball +// for multiple references +func ComputeManifest(refToImage map[name.Reference]v1.Image) (Manifest, error) { + return calculateManifest(refToImage) +} + +// WriteOption a function option to pass to Write() +type WriteOption func(*writeOptions) error +type writeOptions struct { + updates chan<- v1.Update +} + +// WithProgress create a WriteOption for passing to Write() that enables +// a channel to receive updates as they are downloaded and written to disk. +func WithProgress(updates chan<- v1.Update) WriteOption { + return func(o *writeOptions) error { + o.updates = updates + return nil + } +} + +// progressWriter is a writer which will send the download progress +type progressWriter struct { + w io.Writer + updates chan<- v1.Update + size, complete int64 +} + +func (pw *progressWriter) Write(p []byte) (int, error) { + n, err := pw.w.Write(p) + if err != nil { + return n, err + } + + pw.complete += int64(n) + + pw.updates <- v1.Update{ + Total: pw.size, + Complete: pw.complete, + } + + return n, err +} + +func (pw *progressWriter) Error(err error) error { + pw.updates <- v1.Update{ + Total: pw.size, + Complete: pw.complete, + Error: err, + } + return err +} + +func (pw *progressWriter) Close() error { + pw.updates <- v1.Update{ + Total: pw.size, + Complete: pw.complete, + Error: io.EOF, + } + return io.EOF +} + +// calculateSingleFileInTarSize calculate the size a file will take up in a tar archive, +// given the input data. Provided by rounding up to nearest whole block (512) +// and adding header 512 +func calculateSingleFileInTarSize(in int64) (out int64) { + // doing this manually, because math.Round() works with float64 + out += in + if remainder := out % 512; remainder != 0 { + out += (512 - remainder) + } + out += 512 + return out +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go new file mode 100644 index 000000000..21f223650 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go @@ -0,0 +1,71 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// MediaType is an enumeration of the supported mime types that an element of an image might have. +type MediaType string + +// The collection of known MediaType values. +const ( + OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json" + OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json" + OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json" + OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json" + OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip" + OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar" + OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json" + DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json" + DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json" + DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json" + DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar" + + OCIVendorPrefix = "vnd.oci" + DockerVendorPrefix = "vnd.docker" +) + +// IsDistributable returns true if a layer is distributable, see: +// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers +func (m MediaType) IsDistributable() bool { + switch m { + case DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: + return false + } + return true +} + +// IsImage returns true if the mediaType represents an image manifest, as opposed to something else, like an index. +func (m MediaType) IsImage() bool { + switch m { + case OCIManifestSchema1, DockerManifestSchema2: + return true + } + return false +} + +// IsIndex returns true if the mediaType represents an index, as opposed to something else, like an image. +func (m MediaType) IsIndex() bool { + switch m { + case OCIImageIndex, DockerManifestList: + return true + } + return false +} diff --git a/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go new file mode 100644 index 000000000..3f92f0913 --- /dev/null +++ b/extender/code/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go @@ -0,0 +1,318 @@ +// +build !ignore_autogenerated + +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = new(HealthConfig) + (*in).DeepCopyInto(*out) + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Shell != nil { + in, out := &in.Shell, &out.Shell + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigFile) DeepCopyInto(out *ConfigFile) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]History, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.RootFS.DeepCopyInto(&out.RootFS) + in.Config.DeepCopyInto(&out.Config) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile. +func (in *ConfigFile) DeepCopy() *ConfigFile { + if in == nil { + return nil + } + out := new(ConfigFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Descriptor) DeepCopyInto(out *Descriptor) { + *out = *in + out.Digest = in.Digest + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(Platform) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor. +func (in *Descriptor) DeepCopy() *Descriptor { + if in == nil { + return nil + } + out := new(Descriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hash) DeepCopyInto(out *Hash) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash. +func (in *Hash) DeepCopy() *Hash { + if in == nil { + return nil + } + out := new(Hash) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthConfig) DeepCopyInto(out *HealthConfig) { + *out = *in + if in.Test != nil { + in, out := &in.Test, &out.Test + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthConfig. +func (in *HealthConfig) DeepCopy() *HealthConfig { + if in == nil { + return nil + } + out := new(HealthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *History) DeepCopyInto(out *History) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History. +func (in *History) DeepCopy() *History { + if in == nil { + return nil + } + out := new(History) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManifest) DeepCopyInto(out *IndexManifest) { + *out = *in + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManifest. +func (in *IndexManifest) DeepCopy() *IndexManifest { + if in == nil { + return nil + } + out := new(IndexManifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Manifest) DeepCopyInto(out *Manifest) { + *out = *in + in.Config.DeepCopyInto(&out.Config) + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest. +func (in *Manifest) DeepCopy() *Manifest { + if in == nil { + return nil + } + out := new(Manifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.OSFeatures != nil { + in, out := &in.OSFeatures, &out.OSFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootFS) DeepCopyInto(out *RootFS) { + *out = *in + if in.DiffIDs != nil { + in, out := &in.DiffIDs, &out.DiffIDs + *out = make([]Hash, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS. +func (in *RootFS) DeepCopy() *RootFS { + if in == nil { + return nil + } + out := new(RootFS) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. +func (in *Time) DeepCopy() *Time { + if in == nil { + return nil + } + out := new(Time) + in.DeepCopyInto(out) + return out +} diff --git a/extender/code/vendor/github.com/google/gofuzz/.travis.yml b/extender/code/vendor/github.com/google/gofuzz/.travis.yml new file mode 100644 index 000000000..f8684d99f --- /dev/null +++ b/extender/code/vendor/github.com/google/gofuzz/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.4 + - 1.3 + - 1.2 + - tip + +install: + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + +script: + - go test -cover diff --git a/extender/code/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/extender/code/vendor/github.com/google/gofuzz/CONTRIBUTING.md new file mode 100644 index 000000000..51cf5cd1a --- /dev/null +++ b/extender/code/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Go makes it very simple to ensure properly formatted code, so always run + `go fmt` on your code before committing it. You should also run + [golint][] over your code. As noted in the [golint readme][], it's not + strictly necessary that your code be completely "lint-free", but this will + help you find common style issues. + + 1. Any significant changes should almost always be accompanied by tests. The + project already has good test coverage, so look at some of the existing + tests if you're unsure how to go about it. [gocov][] and [gocov-html][] + are invaluable tools for seeing which parts of your code aren't being + exercised by your tests. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[golint]: https://github.com/golang/lint +[golint readme]: https://github.com/golang/lint/blob/master/README +[gocov]: https://github.com/axw/gocov +[gocov-html]: https://github.com/matm/gocov-html +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/extender/code/vendor/github.com/google/gofuzz/LICENSE b/extender/code/vendor/github.com/google/gofuzz/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/extender/code/vendor/github.com/google/gofuzz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/extender/code/vendor/github.com/google/gofuzz/README.md b/extender/code/vendor/github.com/google/gofuzz/README.md new file mode 100644 index 000000000..386c2a457 --- /dev/null +++ b/extender/code/vendor/github.com/google/gofuzz/README.md @@ -0,0 +1,71 @@ +gofuzz +====== + +gofuzz is a library for populating go objects with random values. + +[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.svg)](https://godoc.org/github.com/google/gofuzz) +[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) + +This is useful for testing: + +* Do your project's objects really serialize/unserialize correctly in all cases? +* Is there an incorrectly formatted object that will cause your project to panic? + +Import with ```import "github.com/google/gofuzz"``` + +You can use it on single variables: +```go +f := fuzz.New() +var myInt int +f.Fuzz(&myInt) // myInt gets a random value. +``` + +You can use it on maps: +```go +f := fuzz.New().NilChance(0).NumElements(1, 1) +var myMap map[ComplexKeyType]string +f.Fuzz(&myMap) // myMap will have exactly one element. +``` + +Customize the chance of getting a nil pointer: +```go +f := fuzz.New().NilChance(.5) +var fancyStruct struct { + A, B, C, D *string +} +f.Fuzz(&fancyStruct) // About half the pointers should be set. +``` + +You can even customize the randomization completely if needed: +```go +type MyEnum string +const ( + A MyEnum = "A" + B MyEnum = "B" +) +type MyInfo struct { + Type MyEnum + AInfo *string + BInfo *string +} + +f := fuzz.New().NilChance(0).Funcs( + func(e *MyInfo, c fuzz.Continue) { + switch c.Intn(2) { + case 0: + e.Type = A + c.Fuzz(&e.AInfo) + case 1: + e.Type = B + c.Fuzz(&e.BInfo) + } + }, +) + +var myObject MyInfo +f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. +``` + +See more examples in ```example_test.go```. + +Happy testing! diff --git a/extender/code/vendor/github.com/google/gofuzz/doc.go b/extender/code/vendor/github.com/google/gofuzz/doc.go new file mode 100644 index 000000000..9f9956d4a --- /dev/null +++ b/extender/code/vendor/github.com/google/gofuzz/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fuzz is a library for populating go objects with random values. +package fuzz diff --git a/extender/code/vendor/github.com/google/gofuzz/fuzz.go b/extender/code/vendor/github.com/google/gofuzz/fuzz.go new file mode 100644 index 000000000..da0a5f938 --- /dev/null +++ b/extender/code/vendor/github.com/google/gofuzz/fuzz.go @@ -0,0 +1,506 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fuzz + +import ( + "fmt" + "math/rand" + "reflect" + "regexp" + "time" +) + +// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. +type fuzzFuncMap map[reflect.Type]reflect.Value + +// Fuzzer knows how to fill any object with random fields. +type Fuzzer struct { + fuzzFuncs fuzzFuncMap + defaultFuzzFuncs fuzzFuncMap + r *rand.Rand + nilChance float64 + minElements int + maxElements int + maxDepth int + skipFieldPatterns []*regexp.Regexp +} + +// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, +// RandSource, NilChance, or NumElements in any order. +func New() *Fuzzer { + return NewWithSeed(time.Now().UnixNano()) +} + +func NewWithSeed(seed int64) *Fuzzer { + f := &Fuzzer{ + defaultFuzzFuncs: fuzzFuncMap{ + reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), + }, + + fuzzFuncs: fuzzFuncMap{}, + r: rand.New(rand.NewSource(seed)), + nilChance: .2, + minElements: 1, + maxElements: 10, + maxDepth: 100, + } + return f +} + +// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. +// +// Each entry in fuzzFuncs must be a function taking two parameters. +// The first parameter must be a pointer or map. It is the variable that +// function will fill with random data. The second parameter must be a +// fuzz.Continue, which will provide a source of randomness and a way +// to automatically continue fuzzing smaller pieces of the first parameter. +// +// These functions are called sensibly, e.g., if you wanted custom string +// fuzzing, the function `func(s *string, c fuzz.Continue)` would get +// called and passed the address of strings. Maps and pointers will always +// be made/new'd for you, ignoring the NilChange option. For slices, it +// doesn't make much sense to pre-create them--Fuzzer doesn't know how +// long you want your slice--so take a pointer to a slice, and make it +// yourself. (If you don't want your map/pointer type pre-made, take a +// pointer to it, and make it yourself.) See the examples for a range of +// custom functions. +func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { + for i := range fuzzFuncs { + v := reflect.ValueOf(fuzzFuncs[i]) + if v.Kind() != reflect.Func { + panic("Need only funcs!") + } + t := v.Type() + if t.NumIn() != 2 || t.NumOut() != 0 { + panic("Need 2 in and 0 out params!") + } + argT := t.In(0) + switch argT.Kind() { + case reflect.Ptr, reflect.Map: + default: + panic("fuzzFunc must take pointer or map type") + } + if t.In(1) != reflect.TypeOf(Continue{}) { + panic("fuzzFunc's second parameter must be type fuzz.Continue") + } + f.fuzzFuncs[argT] = v + } + return f +} + +// RandSource causes f to get values from the given source of randomness. +// Use if you want deterministic fuzzing. +func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { + f.r = rand.New(s) + return f +} + +// NilChance sets the probability of creating a nil pointer, map, or slice to +// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. +func (f *Fuzzer) NilChance(p float64) *Fuzzer { + if p < 0 || p > 1 { + panic("p should be between 0 and 1, inclusive.") + } + f.nilChance = p + return f +} + +// NumElements sets the minimum and maximum number of elements that will be +// added to a non-nil map or slice. +func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { + if atLeast > atMost { + panic("atLeast must be <= atMost") + } + if atLeast < 0 { + panic("atLeast must be >= 0") + } + f.minElements = atLeast + f.maxElements = atMost + return f +} + +func (f *Fuzzer) genElementCount() int { + if f.minElements == f.maxElements { + return f.minElements + } + return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) +} + +func (f *Fuzzer) genShouldFill() bool { + return f.r.Float64() > f.nilChance +} + +// MaxDepth sets the maximum number of recursive fuzz calls that will be made +// before stopping. This includes struct members, pointers, and map and slice +// elements. +func (f *Fuzzer) MaxDepth(d int) *Fuzzer { + f.maxDepth = d + return f +} + +// Skip fields which match the supplied pattern. Call this multiple times if needed +// This is useful to skip XXX_ fields generated by protobuf +func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { + f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) + return f +} + +// Fuzz recursively fills all of obj's fields with something random. First +// this tries to find a custom fuzz function (see Funcs). If there is no +// custom function this tests whether the object implements fuzz.Interface and, +// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if +// there is a default fuzz function provided by this package. If all of that +// fails, this will generate random values for all primitive fields and then +// recurse for all non-primitives. +// +// This is safe for cyclic or tree-like structs, up to a limit. Use the +// MaxDepth method to adjust how deep you need it to recurse. +// +// obj must be a pointer. Only exported (public) fields can be set (thanks, +// golang :/ ) Intended for tests, so will panic on bad input or unimplemented +// fields. +func (f *Fuzzer) Fuzz(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + f.fuzzWithContext(v, 0) +} + +// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for +// obj's type will not be called and obj will not be tested for fuzz.Interface +// conformance. This applies only to obj and not other instances of obj's +// type. +// Not safe for cyclic or tree-like structs! +// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) +// Intended for tests, so will panic on bad input or unimplemented fields. +func (f *Fuzzer) FuzzNoCustom(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + f.fuzzWithContext(v, flagNoCustomFuzz) +} + +const ( + // Do not try to find a custom fuzz function. Does not apply recursively. + flagNoCustomFuzz uint64 = 1 << iota +) + +func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { + fc := &fuzzerContext{fuzzer: f} + fc.doFuzz(v, flags) +} + +// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer +// be thread-safe. +type fuzzerContext struct { + fuzzer *Fuzzer + curDepth int +} + +func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { + if fc.curDepth >= fc.fuzzer.maxDepth { + return + } + fc.curDepth++ + defer func() { fc.curDepth-- }() + + if !v.CanSet() { + return + } + + if flags&flagNoCustomFuzz == 0 { + // Check for both pointer and non-pointer custom functions. + if v.CanAddr() && fc.tryCustom(v.Addr()) { + return + } + if fc.tryCustom(v) { + return + } + } + + if fn, ok := fillFuncMap[v.Kind()]; ok { + fn(v, fc.fuzzer.r) + return + } + switch v.Kind() { + case reflect.Map: + if fc.fuzzer.genShouldFill() { + v.Set(reflect.MakeMap(v.Type())) + n := fc.fuzzer.genElementCount() + for i := 0; i < n; i++ { + key := reflect.New(v.Type().Key()).Elem() + fc.doFuzz(key, 0) + val := reflect.New(v.Type().Elem()).Elem() + fc.doFuzz(val, 0) + v.SetMapIndex(key, val) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Ptr: + if fc.fuzzer.genShouldFill() { + v.Set(reflect.New(v.Type().Elem())) + fc.doFuzz(v.Elem(), 0) + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Slice: + if fc.fuzzer.genShouldFill() { + n := fc.fuzzer.genElementCount() + v.Set(reflect.MakeSlice(v.Type(), n, n)) + for i := 0; i < n; i++ { + fc.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Array: + if fc.fuzzer.genShouldFill() { + n := v.Len() + for i := 0; i < n; i++ { + fc.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + skipField := false + fieldName := v.Type().Field(i).Name + for _, pattern := range fc.fuzzer.skipFieldPatterns { + if pattern.MatchString(fieldName) { + skipField = true + break + } + } + if !skipField { + fc.doFuzz(v.Field(i), 0) + } + } + case reflect.Chan: + fallthrough + case reflect.Func: + fallthrough + case reflect.Interface: + fallthrough + default: + panic(fmt.Sprintf("Can't handle %#v", v.Interface())) + } +} + +// tryCustom searches for custom handlers, and returns true iff it finds a match +// and successfully randomizes v. +func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { + // First: see if we have a fuzz function for it. + doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] + if !ok { + // Second: see if it can fuzz itself. + if v.CanInterface() { + intf := v.Interface() + if fuzzable, ok := intf.(Interface); ok { + fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) + return true + } + } + // Finally: see if there is a default fuzz function. + doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] + if !ok { + return false + } + } + + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.New(v.Type().Elem())) + } + case reflect.Map: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.MakeMap(v.Type())) + } + default: + return false + } + + doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ + fc: fc, + Rand: fc.fuzzer.r, + })}) + return true +} + +// Interface represents an object that knows how to fuzz itself. Any time we +// find a type that implements this interface we will delegate the act of +// fuzzing itself. +type Interface interface { + Fuzz(c Continue) +} + +// Continue can be passed to custom fuzzing functions to allow them to use +// the correct source of randomness and to continue fuzzing their members. +type Continue struct { + fc *fuzzerContext + + // For convenience, Continue implements rand.Rand via embedding. + // Use this for generating any randomness if you want your fuzzing + // to be repeatable for a given seed. + *rand.Rand +} + +// Fuzz continues fuzzing obj. obj must be a pointer. +func (c Continue) Fuzz(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + c.fc.doFuzz(v, 0) +} + +// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for +// obj's type will not be called and obj will not be tested for fuzz.Interface +// conformance. This applies only to obj and not other instances of obj's +// type. +func (c Continue) FuzzNoCustom(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + c.fc.doFuzz(v, flagNoCustomFuzz) +} + +// RandString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func (c Continue) RandString() string { + return randString(c.Rand) +} + +// RandUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func (c Continue) RandUint64() uint64 { + return randUint64(c.Rand) +} + +// RandBool returns true or false randomly. +func (c Continue) RandBool() bool { + return randBool(c.Rand) +} + +func fuzzInt(v reflect.Value, r *rand.Rand) { + v.SetInt(int64(randUint64(r))) +} + +func fuzzUint(v reflect.Value, r *rand.Rand) { + v.SetUint(randUint64(r)) +} + +func fuzzTime(t *time.Time, c Continue) { + var sec, nsec int64 + // Allow for about 1000 years of random time values, which keeps things + // like JSON parsing reasonably happy. + sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) + c.Fuzz(&nsec) + *t = time.Unix(sec, nsec) +} + +var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ + reflect.Bool: func(v reflect.Value, r *rand.Rand) { + v.SetBool(randBool(r)) + }, + reflect.Int: fuzzInt, + reflect.Int8: fuzzInt, + reflect.Int16: fuzzInt, + reflect.Int32: fuzzInt, + reflect.Int64: fuzzInt, + reflect.Uint: fuzzUint, + reflect.Uint8: fuzzUint, + reflect.Uint16: fuzzUint, + reflect.Uint32: fuzzUint, + reflect.Uint64: fuzzUint, + reflect.Uintptr: fuzzUint, + reflect.Float32: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(float64(r.Float32())) + }, + reflect.Float64: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(r.Float64()) + }, + reflect.Complex64: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, + reflect.Complex128: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, + reflect.String: func(v reflect.Value, r *rand.Rand) { + v.SetString(randString(r)) + }, + reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, +} + +// randBool returns true or false randomly. +func randBool(r *rand.Rand) bool { + if r.Int()&1 == 1 { + return true + } + return false +} + +type charRange struct { + first, last rune +} + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (r *charRange) choose(rand *rand.Rand) rune { + count := int64(r.last - r.first) + return r.first + rune(rand.Int63n(count)) +} + +var unicodeRanges = []charRange{ + {' ', '~'}, // ASCII characters + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +// randString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func randString(r *rand.Rand) string { + n := r.Intn(20) + runes := make([]rune, n) + for i := range runes { + runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) + } + return string(runes) +} + +// randUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func randUint64(r *rand.Rand) uint64 { + return uint64(r.Uint32())<<32 | uint64(r.Uint32()) +} diff --git a/extender/code/vendor/github.com/google/gofuzz/go.mod b/extender/code/vendor/github.com/google/gofuzz/go.mod new file mode 100644 index 000000000..8ec4fe9e9 --- /dev/null +++ b/extender/code/vendor/github.com/google/gofuzz/go.mod @@ -0,0 +1,3 @@ +module github.com/google/gofuzz + +go 1.12 diff --git a/extender/code/vendor/github.com/googleapis/gnostic/LICENSE b/extender/code/vendor/github.com/googleapis/gnostic/LICENSE new file mode 100644 index 000000000..6b0b1270f --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/extender/code/vendor/github.com/googleapis/gnostic/compiler/README.md b/extender/code/vendor/github.com/googleapis/gnostic/compiler/README.md new file mode 100644 index 000000000..848b16c69 --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/compiler/README.md @@ -0,0 +1,3 @@ +# Compiler support code + +This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file diff --git a/extender/code/vendor/github.com/googleapis/gnostic/compiler/context.go b/extender/code/vendor/github.com/googleapis/gnostic/compiler/context.go new file mode 100644 index 000000000..a64c1b75d --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/compiler/context.go @@ -0,0 +1,43 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Context contains state of the compiler as it traverses a document. +type Context struct { + Parent *Context + Name string + ExtensionHandlers *[]ExtensionHandler +} + +// NewContextWithExtensions returns a new object representing the compiler state +func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { + return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} +} + +// NewContext returns a new object representing the compiler state +func NewContext(name string, parent *Context) *Context { + if parent != nil { + return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} + } + return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} +} + +// Description returns a text description of the compiler state +func (context *Context) Description() string { + if context.Parent != nil { + return context.Parent.Description() + "." + context.Name + } + return context.Name +} diff --git a/extender/code/vendor/github.com/googleapis/gnostic/compiler/error.go b/extender/code/vendor/github.com/googleapis/gnostic/compiler/error.go new file mode 100644 index 000000000..d8672c100 --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/compiler/error.go @@ -0,0 +1,61 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Error represents compiler errors and their location in the document. +type Error struct { + Context *Context + Message string +} + +// NewError creates an Error. +func NewError(context *Context, message string) *Error { + return &Error{Context: context, Message: message} +} + +// Error returns the string value of an Error. +func (err *Error) Error() string { + if err.Context == nil { + return "ERROR " + err.Message + } + return "ERROR " + err.Context.Description() + " " + err.Message +} + +// ErrorGroup is a container for groups of Error values. +type ErrorGroup struct { + Errors []error +} + +// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. +func NewErrorGroupOrNil(errors []error) error { + if len(errors) == 0 { + return nil + } else if len(errors) == 1 { + return errors[0] + } else { + return &ErrorGroup{Errors: errors} + } +} + +func (group *ErrorGroup) Error() string { + result := "" + for i, err := range group.Errors { + if i > 0 { + result += "\n" + } + result += err.Error() + } + return result +} diff --git a/extender/code/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/extender/code/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go new file mode 100644 index 000000000..1f85b650e --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go @@ -0,0 +1,101 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + + "strings" + + "errors" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + ext_plugin "github.com/googleapis/gnostic/extensions" + yaml "gopkg.in/yaml.v2" +) + +// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. +type ExtensionHandler struct { + Name string +} + +// HandleExtension calls a binary extension handler. +func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { + handled := false + var errFromPlugin error + var outFromPlugin *any.Any + + if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { + for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { + outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) + if outFromPlugin == nil { + continue + } else { + handled = true + break + } + } + } + return handled, outFromPlugin, errFromPlugin +} + +func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + binary, _ := yaml.Marshal(in) + + request := &ext_plugin.ExtensionHandlerRequest{} + + version := &ext_plugin.Version{} + version.Major = 0 + version.Minor = 1 + version.Patch = 0 + request.CompilerVersion = version + + request.Wrapper = &ext_plugin.Wrapper{} + + request.Wrapper.Version = "v2" + request.Wrapper.Yaml = string(binary) + request.Wrapper.ExtensionName = extensionName + + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + + if err != nil { + fmt.Printf("Error: %+v\n", err) + return nil, err + } + response := &ext_plugin.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil { + fmt.Printf("Error: %+v\n", err) + fmt.Printf("%s\n", string(output)) + return nil, err + } + if !response.Handled { + return nil, nil + } + if len(response.Error) != 0 { + message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) + return nil, errors.New(message) + } + return response.Value, nil + } + return nil, nil +} diff --git a/extender/code/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/extender/code/vendor/github.com/googleapis/gnostic/compiler/helpers.go new file mode 100644 index 000000000..76df635ff --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/compiler/helpers.go @@ -0,0 +1,197 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "gopkg.in/yaml.v2" + "regexp" + "sort" + "strconv" +) + +// compiler helper functions, usually called from generated code + +// UnpackMap gets a yaml.MapSlice if possible. +func UnpackMap(in interface{}) (yaml.MapSlice, bool) { + m, ok := in.(yaml.MapSlice) + if ok { + return m, true + } + // do we have an empty array? + a, ok := in.([]interface{}) + if ok && len(a) == 0 { + // if so, return an empty map + return yaml.MapSlice{}, true + } + return nil, false +} + +// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. +func SortedKeysForMap(m yaml.MapSlice) []string { + keys := make([]string, 0) + for _, item := range m { + keys = append(keys, item.Key.(string)) + } + sort.Strings(keys) + return keys +} + +// MapHasKey returns true if a yaml.MapSlice contains a specified key. +func MapHasKey(m yaml.MapSlice, key string) bool { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return true + } + } + return false +} + +// MapValueForKey gets the value of a map value for a specified key. +func MapValueForKey(m yaml.MapSlice, key string) interface{} { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return item.Value + } + } + return nil +} + +// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. +func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { + stringArray := make([]string, 0) + for _, item := range interfaceArray { + v, ok := item.(string) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// MissingKeysInMap identifies which keys from a list of required keys are not in a map. +func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { + missingKeys := make([]string, 0) + for _, k := range requiredKeys { + if !MapHasKey(m, k) { + missingKeys = append(missingKeys, k) + } + } + return missingKeys +} + +// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. +func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { + invalidKeys := make([]string, 0) + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok { + key := itemKey + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if allowedPattern.MatchString(key) { + found = true + break + } + } + if !found { + invalidKeys = append(invalidKeys, key) + } + } + } + } + return invalidKeys +} + +// DescribeMap describes a map (for debugging purposes). +func DescribeMap(in interface{}, indent string) string { + description := "" + m, ok := in.(map[string]interface{}) + if ok { + keys := make([]string, 0) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := m[k] + description += fmt.Sprintf("%s%s:\n", indent, k) + description += DescribeMap(v, indent+" ") + } + return description + } + a, ok := in.([]interface{}) + if ok { + for i, v := range a { + description += fmt.Sprintf("%s%d:\n", indent, i) + description += DescribeMap(v, indent+" ") + } + return description + } + description += fmt.Sprintf("%s%+v\n", indent, in) + return description +} + +// PluralProperties returns the string "properties" pluralized. +func PluralProperties(count int) string { + if count == 1 { + return "property" + } + return "properties" +} + +// StringArrayContainsValue returns true if a string array contains a specified value. +func StringArrayContainsValue(array []string, value string) bool { + for _, item := range array { + if item == value { + return true + } + } + return false +} + +// StringArrayContainsValues returns true if a string array contains all of a list of specified values. +func StringArrayContainsValues(array []string, values []string) bool { + for _, value := range values { + if !StringArrayContainsValue(array, value) { + return false + } + } + return true +} + +// StringValue returns the string value of an item. +func StringValue(item interface{}) (value string, ok bool) { + value, ok = item.(string) + if ok { + return value, ok + } + intValue, ok := item.(int) + if ok { + return strconv.Itoa(intValue), true + } + return "", false +} diff --git a/extender/code/vendor/github.com/googleapis/gnostic/compiler/main.go b/extender/code/vendor/github.com/googleapis/gnostic/compiler/main.go new file mode 100644 index 000000000..9713a21cc --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/compiler/main.go @@ -0,0 +1,16 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compiler provides support functions to generated compiler code. +package compiler diff --git a/extender/code/vendor/github.com/googleapis/gnostic/compiler/reader.go b/extender/code/vendor/github.com/googleapis/gnostic/compiler/reader.go new file mode 100644 index 000000000..955b985b8 --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -0,0 +1,241 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "errors" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "path/filepath" + "strings" + + yaml "gopkg.in/yaml.v2" +) + +var fileCache map[string][]byte +var infoCache map[string]interface{} +var count int64 + +var verboseReader = false +var fileCacheEnable = true +var infoCacheEnable = true + +func initializeFileCache() { + if fileCache == nil { + fileCache = make(map[string][]byte, 0) + } +} + +func initializeInfoCache() { + if infoCache == nil { + infoCache = make(map[string]interface{}, 0) + } +} + +func EnableFileCache() { + fileCacheEnable = true +} + +func EnableInfoCache() { + infoCacheEnable = true +} + +func DisableFileCache() { + fileCacheEnable = false +} + +func DisableInfoCache() { + infoCacheEnable = false +} + +func RemoveFromFileCache(fileurl string) { + if !fileCacheEnable { + return + } + initializeFileCache() + delete(fileCache, fileurl) +} + +func RemoveFromInfoCache(filename string) { + if !infoCacheEnable { + return + } + initializeInfoCache() + delete(infoCache, filename) +} + +func GetInfoCache() map[string]interface{} { + if infoCache == nil { + initializeInfoCache() + } + return infoCache +} + +func ClearFileCache() { + fileCache = make(map[string][]byte, 0) +} + +func ClearInfoCache() { + infoCache = make(map[string]interface{}) +} + +func ClearCaches() { + ClearFileCache() + ClearInfoCache() +} + +// FetchFile gets a specified file from the local filesystem or a remote location. +func FetchFile(fileurl string) ([]byte, error) { + var bytes []byte + initializeFileCache() + if fileCacheEnable { + bytes, ok := fileCache[fileurl] + if ok { + if verboseReader { + log.Printf("Cache hit %s", fileurl) + } + return bytes, nil + } + if verboseReader { + log.Printf("Fetching %s", fileurl) + } + } + response, err := http.Get(fileurl) + if err != nil { + return nil, err + } + defer response.Body.Close() + if response.StatusCode != 200 { + return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) + } + bytes, err = ioutil.ReadAll(response.Body) + if fileCacheEnable && err == nil { + fileCache[fileurl] = bytes + } + return bytes, err +} + +// ReadBytesForFile reads the bytes of a file. +func ReadBytesForFile(filename string) ([]byte, error) { + // is the filename a url? + fileurl, _ := url.Parse(filename) + if fileurl.Scheme != "" { + // yes, fetch it + bytes, err := FetchFile(filename) + if err != nil { + return nil, err + } + return bytes, nil + } + // no, it's a local filename + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. +func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { + initializeInfoCache() + if infoCacheEnable { + cachedInfo, ok := infoCache[filename] + if ok { + if verboseReader { + log.Printf("Cache hit info for file %s", filename) + } + return cachedInfo, nil + } + if verboseReader { + log.Printf("Reading info for file %s", filename) + } + } + var info yaml.MapSlice + err := yaml.Unmarshal(bytes, &info) + if err != nil { + return nil, err + } + if infoCacheEnable && len(filename) > 0 { + infoCache[filename] = info + } + return info, nil +} + +// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. +func ReadInfoForRef(basefile string, ref string) (interface{}, error) { + initializeInfoCache() + if infoCacheEnable { + info, ok := infoCache[ref] + if ok { + if verboseReader { + log.Printf("Cache hit for ref %s#%s", basefile, ref) + } + return info, nil + } + if verboseReader { + log.Printf("Reading info for ref %s#%s", basefile, ref) + } + } + count = count + 1 + basedir, _ := filepath.Split(basefile) + parts := strings.Split(ref, "#") + var filename string + if parts[0] != "" { + filename = parts[0] + if _, err := url.ParseRequestURI(parts[0]); err != nil { + // It is not an URL, so the file is local + filename = basedir + parts[0] + } + } else { + filename = basefile + } + bytes, err := ReadBytesForFile(filename) + if err != nil { + return nil, err + } + info, err := ReadInfoFromBytes(filename, bytes) + if err != nil { + log.Printf("File error: %v\n", err) + } else { + if len(parts) > 1 { + path := strings.Split(parts[1], "/") + for i, key := range path { + if i > 0 { + m, ok := info.(yaml.MapSlice) + if ok { + found := false + for _, section := range m { + if section.Key == key { + info = section.Value + found = true + } + } + if !found { + infoCache[ref] = nil + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + } + } + } + } + } + if infoCacheEnable { + infoCache[ref] = info + } + return info, nil +} diff --git a/extender/code/vendor/github.com/googleapis/gnostic/extensions/README.md b/extender/code/vendor/github.com/googleapis/gnostic/extensions/README.md new file mode 100644 index 000000000..ff1c2eb1e --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/extensions/README.md @@ -0,0 +1,5 @@ +# Extensions + +This directory contains support code for building Gnostic extensions and associated examples. + +Extensions are used to compile vendor or specification extensions into protocol buffer structures. diff --git a/extender/code/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/extender/code/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go new file mode 100644 index 000000000..432dc06e6 --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -0,0 +1,300 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: extensions/extension.proto + +package openapiextension_v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The version number of OpenAPI compiler. +type Version struct { + Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_661e47e790f76671, []int{0} +} + +func (m *Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (m *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(m, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetMajor() int32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil { + return m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil { + return m.Suffix + } + return "" +} + +// An encoded Request is written to the ExtensionHandler's stdin. +type ExtensionHandlerRequest struct { + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to gnostic. + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` + // The version number of openapi compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } +func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerRequest) ProtoMessage() {} +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_661e47e790f76671, []int{1} +} + +func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b) +} +func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic) +} +func (m *ExtensionHandlerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionHandlerRequest.Merge(m, src) +} +func (m *ExtensionHandlerRequest) XXX_Size() int { + return xxx_messageInfo_ExtensionHandlerRequest.Size(m) +} +func (m *ExtensionHandlerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionHandlerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionHandlerRequest proto.InternalMessageInfo + +func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if m != nil { + return m.Wrapper + } + return nil +} + +func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +type ExtensionHandlerResponse struct { + // true if the extension is handled by the extension handler; false otherwise + Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + Error []string `protobuf:"bytes,2,rep,name=error,proto3" json:"error,omitempty"` + // text output + Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } +func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerResponse) ProtoMessage() {} +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_661e47e790f76671, []int{2} +} + +func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b) +} +func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic) +} +func (m *ExtensionHandlerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionHandlerResponse.Merge(m, src) +} +func (m *ExtensionHandlerResponse) XXX_Size() int { + return xxx_messageInfo_ExtensionHandlerResponse.Size(m) +} +func (m *ExtensionHandlerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionHandlerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionHandlerResponse proto.InternalMessageInfo + +func (m *ExtensionHandlerResponse) GetHandled() bool { + if m != nil { + return m.Handled + } + return false +} + +func (m *ExtensionHandlerResponse) GetError() []string { + if m != nil { + return m.Error + } + return nil +} + +func (m *ExtensionHandlerResponse) GetValue() *any.Any { + if m != nil { + return m.Value + } + return nil +} + +type Wrapper struct { + // version of the OpenAPI specification in which this extension was written. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Name of the extension + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` + // Must be a valid yaml for the proto + Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Wrapper) Reset() { *m = Wrapper{} } +func (m *Wrapper) String() string { return proto.CompactTextString(m) } +func (*Wrapper) ProtoMessage() {} +func (*Wrapper) Descriptor() ([]byte, []int) { + return fileDescriptor_661e47e790f76671, []int{3} +} + +func (m *Wrapper) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Wrapper.Unmarshal(m, b) +} +func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic) +} +func (m *Wrapper) XXX_Merge(src proto.Message) { + xxx_messageInfo_Wrapper.Merge(m, src) +} +func (m *Wrapper) XXX_Size() int { + return xxx_messageInfo_Wrapper.Size(m) +} +func (m *Wrapper) XXX_DiscardUnknown() { + xxx_messageInfo_Wrapper.DiscardUnknown(m) +} + +var xxx_messageInfo_Wrapper proto.InternalMessageInfo + +func (m *Wrapper) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Wrapper) GetExtensionName() string { + if m != nil { + return m.ExtensionName + } + return "" +} + +func (m *Wrapper) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") + proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") + proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") + proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") +} + +func init() { proto.RegisterFile("extensions/extension.proto", fileDescriptor_661e47e790f76671) } + +var fileDescriptor_661e47e790f76671 = []byte{ + // 362 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xeb, 0x40, + 0x18, 0x85, 0x49, 0xbf, 0x72, 0x33, 0x97, 0xdb, 0x2b, 0x63, 0xd1, 0x58, 0x5c, 0x94, 0x80, 0x50, + 0x44, 0xa6, 0x54, 0xc1, 0x7d, 0x0b, 0x45, 0xdd, 0xd8, 0x32, 0x8b, 0xba, 0xb3, 0x4c, 0xd3, 0xb7, + 0x69, 0x24, 0x99, 0x19, 0x27, 0x1f, 0xb6, 0x7f, 0xc5, 0xa5, 0xbf, 0x54, 0x32, 0x93, 0xc4, 0x85, + 0xba, 0x9b, 0xf3, 0x70, 0xda, 0xf7, 0x9c, 0x13, 0xd4, 0x87, 0x7d, 0x0a, 0x3c, 0x09, 0x05, 0x4f, + 0x46, 0xf5, 0x93, 0x48, 0x25, 0x52, 0x81, 0x8f, 0x85, 0x04, 0xce, 0x64, 0xf8, 0xc5, 0xf3, 0x71, + 0xff, 0x2c, 0x10, 0x22, 0x88, 0x60, 0xa4, 0x2d, 0xeb, 0x6c, 0x3b, 0x62, 0xfc, 0x60, 0xfc, 0x9e, + 0x8f, 0xec, 0x25, 0xa8, 0xc2, 0x88, 0x7b, 0xa8, 0x1d, 0xb3, 0x17, 0xa1, 0x5c, 0x6b, 0x60, 0x0d, + 0xdb, 0xd4, 0x08, 0x4d, 0x43, 0x2e, 0x94, 0xdb, 0x28, 0x69, 0x21, 0x0a, 0x2a, 0x59, 0xea, 0xef, + 0xdc, 0xa6, 0xa1, 0x5a, 0xe0, 0x13, 0xd4, 0x49, 0xb2, 0xed, 0x36, 0xdc, 0xbb, 0xad, 0x81, 0x35, + 0x74, 0x68, 0xa9, 0xbc, 0x77, 0x0b, 0x9d, 0xce, 0xaa, 0x40, 0xf7, 0x8c, 0x6f, 0x22, 0x50, 0x14, + 0x5e, 0x33, 0x48, 0x52, 0x7c, 0x8b, 0xec, 0x37, 0xc5, 0xa4, 0x04, 0x73, 0xf7, 0xef, 0xf5, 0x39, + 0xf9, 0xa1, 0x02, 0x79, 0x32, 0x1e, 0x5a, 0x99, 0xf1, 0x1d, 0x3a, 0xf2, 0x45, 0x2c, 0xc3, 0x08, + 0xd4, 0x2a, 0x37, 0x0d, 0x74, 0x98, 0xdf, 0xfe, 0xa0, 0x6c, 0x49, 0xff, 0x57, 0xbf, 0x2a, 0x81, + 0x97, 0x23, 0xf7, 0x7b, 0xb6, 0x44, 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd3, 0x68, 0xa3, 0xc3, + 0xfd, 0xa1, 0x95, 0x2c, 0x06, 0x00, 0xa5, 0xf4, 0x2c, 0xcd, 0xa1, 0x43, 0x8d, 0xc0, 0x97, 0xa8, + 0x9d, 0xb3, 0x28, 0x83, 0x32, 0x49, 0x8f, 0x98, 0xe1, 0x49, 0x35, 0x3c, 0x99, 0xf0, 0x03, 0x35, + 0x16, 0xef, 0x19, 0xd9, 0x65, 0xa9, 0xe2, 0x4c, 0x55, 0xc1, 0xd2, 0xc3, 0x55, 0x12, 0x5f, 0xa0, + 0x6e, 0xdd, 0x62, 0xc5, 0x59, 0x0c, 0xfa, 0x33, 0x38, 0xf4, 0x5f, 0x4d, 0x1f, 0x59, 0x0c, 0x18, + 0xa3, 0xd6, 0x81, 0xc5, 0x91, 0x3e, 0xeb, 0x50, 0xfd, 0x9e, 0x5e, 0xa1, 0xae, 0x50, 0x01, 0x09, + 0xb8, 0x48, 0xd2, 0xd0, 0x27, 0xf9, 0x78, 0x8a, 0xe7, 0x12, 0xf8, 0x64, 0xf1, 0x50, 0xd7, 0x5d, + 0x8e, 0x17, 0xd6, 0x47, 0xa3, 0x39, 0x9f, 0xcc, 0xd6, 0x1d, 0x1d, 0xf1, 0xe6, 0x33, 0x00, 0x00, + 0xff, 0xff, 0xeb, 0xf3, 0xfa, 0x65, 0x5c, 0x02, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/extender/code/vendor/github.com/googleapis/gnostic/extensions/extension.proto new file mode 100644 index 000000000..04856f913 --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -0,0 +1,93 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +package openapiextension.v1; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIExtensionV1"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.gnostic.v1"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +// +option objc_class_prefix = "OAE"; // "OpenAPI Extension" + +// The version number of OpenAPI compiler. +message Version { + int32 major = 1; + int32 minor = 2; + int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + string suffix = 4; +} + +// An encoded Request is written to the ExtensionHandler's stdin. +message ExtensionHandlerRequest { + + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to gnostic. + Wrapper wrapper = 1; + + // The version number of openapi compiler. + Version compiler_version = 3; +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +message ExtensionHandlerResponse { + + // true if the extension is handled by the extension handler; false otherwise + bool handled = 1; + + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + repeated string error = 2; + + // text output + google.protobuf.Any value = 3; +} + +message Wrapper { + // version of the OpenAPI specification in which this extension was written. + string version = 1; + + // Name of the extension + string extension_name = 2; + + // Must be a valid yaml for the proto + string yaml = 3; +} diff --git a/extender/code/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/extender/code/vendor/github.com/googleapis/gnostic/extensions/extensions.go new file mode 100644 index 000000000..94a8e62a7 --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/extensions/extensions.go @@ -0,0 +1,82 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapiextension_v1 + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" +) + +type documentHandler func(version string, extensionName string, document string) +type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) + +func forInputYamlFromOpenapic(handler documentHandler) { + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + fmt.Println("File error:", err.Error()) + os.Exit(1) + } + if len(data) == 0 { + fmt.Println("No input data.") + os.Exit(1) + } + request := &ExtensionHandlerRequest{} + err = proto.Unmarshal(data, request) + if err != nil { + fmt.Println("Input error:", err.Error()) + os.Exit(1) + } + handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) +} + +// ProcessExtension calles the handler for a specified extension. +func ProcessExtension(handleExtension extensionHandler) { + response := &ExtensionHandlerResponse{} + forInputYamlFromOpenapic( + func(version string, extensionName string, yamlInput string) { + var newObject proto.Message + var err error + + handled, newObject, err := handleExtension(extensionName, yamlInput) + if !handled { + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + + // If we reach here, then the extension is handled + response.Handled = true + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + response.Value, err = ptypes.MarshalAny(newObject) + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + }) + + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) +} diff --git a/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go b/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go new file mode 100644 index 000000000..4fd44c45e --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go @@ -0,0 +1,8847 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v2 + +import ( + "fmt" + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v2" + "regexp" + "strings" +) + +// Version returns the package name (and OpenAPI version). +func Version() string { + return "openapi_v2" +} + +// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. +func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // bool boolean = 2; + boolValue, ok := in.(bool) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAny creates an object of type Any if possible, returning an error if not. +func NewAny(in interface{}, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes, _ := yaml.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. +func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { + errors := make([]error, 0) + x := &ApiKeySecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [apiKey] + if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header query] + if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. +func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { + errors := make([]error, 0) + x := &BasicAuthenticationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [basic] + if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. +func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { + errors := make([]error, 0) + x := &BodyParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "schema"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "required", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [body] + if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema schema = 5; + v5 := compiler.MapValueForKey(m, "schema") + if v5 != nil { + var err error + x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewContact creates an object of type Contact if possible, returning an error if not. +func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefault creates an object of type Default if possible, returning an error if not. +func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { + errors := make([]error, 0) + x := &Default{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefinitions creates an object of type Definitions if possible, returning an error if not. +func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { + errors := make([]error, 0) + x := &Definitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDocument creates an object of type Document if possible, returning an error if not. +func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "paths", "swagger"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string swagger = 1; + v1 := compiler.MapValueForKey(m, "swagger") + if v1 != nil { + x.Swagger, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [2.0] + if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) + if err != nil { + errors = append(errors, err) + } + } + // string host = 3; + v3 := compiler.MapValueForKey(m, "host") + if v3 != nil { + x.Host, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string base_path = 4; + v4 := compiler.MapValueForKey(m, "basePath") + if v4 != nil { + x.BasePath, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string schemes = 5; + v5 := compiler.MapValueForKey(m, "schemes") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 6; + v6 := compiler.MapValueForKey(m, "consumes") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 7; + v7 := compiler.MapValueForKey(m, "produces") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Paths paths = 8; + v8 := compiler.MapValueForKey(m, "paths") + if v8 != nil { + var err error + x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) + if err != nil { + errors = append(errors, err) + } + } + // Definitions definitions = 9; + v9 := compiler.MapValueForKey(m, "definitions") + if v9 != nil { + var err error + x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // ParameterDefinitions parameters = 10; + v10 := compiler.MapValueForKey(m, "parameters") + if v10 != nil { + var err error + x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponseDefinitions responses = 11; + v11 := compiler.MapValueForKey(m, "responses") + if v11 != nil { + var err error + x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // SecurityDefinitions security_definitions = 13; + v13 := compiler.MapValueForKey(m, "securityDefinitions") + if v13 != nil { + var err error + x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Tag tags = 14; + v14 := compiler.MapValueForKey(m, "tags") + if v14 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := v14.([]interface{}) + if ok { + for _, item := range a { + y, err := NewTag(item, compiler.NewContext("tags", context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 15; + v15 := compiler.MapValueForKey(m, "externalDocs") + if v15 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 16; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExamples creates an object of type Examples if possible, returning an error if not. +func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { + errors := make([]error, 0) + x := &Examples{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. +func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. +func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { + errors := make([]error, 0) + x := &FileSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string format = 1; + v1 := compiler.MapValueForKey(m, "format") + if v1 != nil { + x.Format, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 2; + v2 := compiler.MapValueForKey(m, "title") + if v2 != nil { + x.Title, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 4; + v4 := compiler.MapValueForKey(m, "default") + if v4 != nil { + var err error + x.Default, err = NewAny(v4, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string required = 5; + v5 := compiler.MapValueForKey(m, "required") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [file] + if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 7; + v7 := compiler.MapValueForKey(m, "readOnly") + if v7 != nil { + x.ReadOnly, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. +func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { + errors := make([]error, 0) + x := &FormDataParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [formData] + if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array file] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeader creates an object of type Header if possible, returning an error if not. +func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 18; + v18 := compiler.MapValueForKey(m, "description") + if v18 != nil { + x.Description, ok = v18.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 19; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. +func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { + errors := make([]error, 0) + x := &HeaderParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header] + if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaders creates an object of type Headers if possible, returning an error if not. +func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { + errors := make([]error, 0) + x := &Headers{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeader additional_properties = 1; + // MAP: Header + x.AdditionalProperties = make([]*NamedHeader, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedHeader{} + pair.Name = k + var err error + pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewInfo creates an object of type Info if possible, returning an error if not. +func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string version = 2; + v2 := compiler.MapValueForKey(m, "version") + if v2 != nil { + x.Version, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 4; + v4 := compiler.MapValueForKey(m, "termsOfService") + if v4 != nil { + x.TermsOfService, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 5; + v5 := compiler.MapValueForKey(m, "contact") + if v5 != nil { + var err error + x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 6; + v6 := compiler.MapValueForKey(m, "license") + if v6 != nil { + var err error + x.License, err = NewLicense(v6, compiler.NewContext("license", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. +func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Schema = make([]*Schema, 0) + y, err := NewSchema(m, compiler.NewContext("", context)) + if err != nil { + return nil, err + } + x.Schema = append(x.Schema, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. +func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { + errors := make([]error, 0) + x := &JsonReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"$ref", "description"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLicense creates an object of type License if possible, returning an error if not. +func NewLicense(in interface{}, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. +func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. +func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { + errors := make([]error, 0) + x := &NamedHeader{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Header value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. +func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { + errors := make([]error, 0) + x := &NamedParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Parameter value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. +func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. +func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { + errors := make([]error, 0) + x := &NamedResponse{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Response value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. +func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { + errors := make([]error, 0) + x := &NamedResponseValue{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseValue value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. +func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { + errors := make([]error, 0) + x := &NamedSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. +func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &NamedSecurityDefinitionsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecurityDefinitionsItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedString creates an object of type NamedString if possible, returning an error if not. +func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. +func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. +func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { + errors := make([]error, 0) + x := &NonBodyParameter{} + matched := false + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // HeaderParameterSubSchema header_parameter_sub_schema = 1; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // QueryParameterSubSchema query_parameter_sub_schema = 3; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // PathParameterSubSchema path_parameter_sub_schema = 4; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. +func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { + errors := make([]error, 0) + x := &Oauth2AccessCodeSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [accessCode] + if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 5; + v5 := compiler.MapValueForKey(m, "tokenUrl") + if v5 != nil { + x.TokenUrl, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 6; + v6 := compiler.MapValueForKey(m, "description") + if v6 != nil { + x.Description, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. +func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ApplicationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [application] + if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. +func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ImplicitSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [implicit] + if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. +func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { + errors := make([]error, 0) + x := &Oauth2PasswordSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [password] + if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. +func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { + errors := make([]error, 0) + x := &Oauth2Scopes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedString{} + pair.Name = k + pair.Value = v.(string) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOperation creates an object of type Operation if possible, returning an error if not. +func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := v1.([]interface{}) + if ok { + x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 6; + v6 := compiler.MapValueForKey(m, "produces") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 7; + v7 := compiler.MapValueForKey(m, "consumes") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParametersItem parameters = 8; + v8 := compiler.MapValueForKey(m, "parameters") + if v8 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v8.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // Responses responses = 9; + v9 := compiler.MapValueForKey(m, "responses") + if v9 != nil { + var err error + x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string schemes = 10; + v10 := compiler.MapValueForKey(m, "schemes") + if v10 != nil { + v, ok := v10.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 11; + v11 := compiler.MapValueForKey(m, "deprecated") + if v11 != nil { + x.Deprecated, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated NamedAny vendor_extension = 13; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameter creates an object of type Parameter if possible, returning an error if not. +func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + matched := false + // BodyParameter body_parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_BodyParameter{BodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // NonBodyParameter non_body_parameter = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. +func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { + errors := make([]error, 0) + x := &ParameterDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameter additional_properties = 1; + // MAP: Parameter + x.AdditionalProperties = make([]*NamedParameter, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedParameter{} + pair.Name = k + var err error + pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. +func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { + errors := make([]error, 0) + x := &ParametersItem{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathItem creates an object of type PathItem if possible, returning an error if not. +func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 2; + v2 := compiler.MapValueForKey(m, "get") + if v2 != nil { + var err error + x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 3; + v3 := compiler.MapValueForKey(m, "put") + if v3 != nil { + var err error + x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 4; + v4 := compiler.MapValueForKey(m, "post") + if v4 != nil { + var err error + x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 5; + v5 := compiler.MapValueForKey(m, "delete") + if v5 != nil { + var err error + x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 6; + v6 := compiler.MapValueForKey(m, "options") + if v6 != nil { + var err error + x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 7; + v7 := compiler.MapValueForKey(m, "head") + if v7 != nil { + var err error + x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 8; + v8 := compiler.MapValueForKey(m, "patch") + if v8 != nil { + var err error + x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated ParametersItem parameters = 9; + v9 := compiler.MapValueForKey(m, "parameters") + if v9 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v9.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. +func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { + errors := make([]error, 0) + x := &PathParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"required"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [path] + if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPaths creates an object of type Paths if possible, returning an error if not. +func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern0, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedAny vendor_extension = 1; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + // repeated NamedPathItem path = 2; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "/") { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. +func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { + errors := make([]error, 0) + x := &PrimitivesItems{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 18; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewProperties creates an object of type Properties if possible, returning an error if not. +func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. +func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { + errors := make([]error, 0) + x := &QueryParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [query] + if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponse creates an object of type Response if possible, returning an error if not. +func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "examples", "headers", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaItem schema = 2; + v2 := compiler.MapValueForKey(m, "schema") + if v2 != nil { + var err error + x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // Headers headers = 3; + v3 := compiler.MapValueForKey(m, "headers") + if v3 != nil { + var err error + x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) + if err != nil { + errors = append(errors, err) + } + } + // Examples examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. +func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { + errors := make([]error, 0) + x := &ResponseDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponse additional_properties = 1; + // MAP: Response + x.AdditionalProperties = make([]*NamedResponse, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedResponse{} + pair.Name = k + var err error + pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. +func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { + errors := make([]error, 0) + x := &ResponseValue{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewResponse(m, compiler.NewContext("response", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_Response{Response: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponses creates an object of type Responses if possible, returning an error if not. +func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern2, pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedResponseValue response_code = 1; + // MAP: ResponseValue ^([0-9]{3})$|^(default)$ + x.ResponseCode = make([]*NamedResponseValue, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if pattern2.MatchString(k) { + pair := &NamedResponseValue{} + pair.Name = k + var err error + pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseCode = append(x.ResponseCode, pair) + } + } + } + // repeated NamedAny vendor_extension = 2; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchema creates an object of type Schema if possible, returning an error if not. +func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 3; + v3 := compiler.MapValueForKey(m, "title") + if v3 != nil { + x.Title, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float multiple_of = 6; + v6 := compiler.MapValueForKey(m, "multipleOf") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.MultipleOf = v6 + case float32: + x.MultipleOf = float64(v6) + case uint64: + x.MultipleOf = float64(v6) + case uint32: + x.MultipleOf = float64(v6) + case int64: + x.MultipleOf = float64(v6) + case int32: + x.MultipleOf = float64(v6) + case int: + x.MultipleOf = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 7; + v7 := compiler.MapValueForKey(m, "maximum") + if v7 != nil { + switch v7 := v7.(type) { + case float64: + x.Maximum = v7 + case float32: + x.Maximum = float64(v7) + case uint64: + x.Maximum = float64(v7) + case uint32: + x.Maximum = float64(v7) + case int64: + x.Maximum = float64(v7) + case int32: + x.Maximum = float64(v7) + case int: + x.Maximum = float64(v7) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 8; + v8 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v8 != nil { + x.ExclusiveMaximum, ok = v8.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 9; + v9 := compiler.MapValueForKey(m, "minimum") + if v9 != nil { + switch v9 := v9.(type) { + case float64: + x.Minimum = v9 + case float32: + x.Minimum = float64(v9) + case uint64: + x.Minimum = float64(v9) + case uint32: + x.Minimum = float64(v9) + case int64: + x.Minimum = float64(v9) + case int32: + x.Minimum = float64(v9) + case int: + x.Minimum = float64(v9) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 10; + v10 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v10 != nil { + x.ExclusiveMinimum, ok = v10.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 11; + v11 := compiler.MapValueForKey(m, "maxLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 12; + v12 := compiler.MapValueForKey(m, "minLength") + if v12 != nil { + t, ok := v12.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 13; + v13 := compiler.MapValueForKey(m, "pattern") + if v13 != nil { + x.Pattern, ok = v13.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 14; + v14 := compiler.MapValueForKey(m, "maxItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 15; + v15 := compiler.MapValueForKey(m, "minItems") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 16; + v16 := compiler.MapValueForKey(m, "uniqueItems") + if v16 != nil { + x.UniqueItems, ok = v16.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 17; + v17 := compiler.MapValueForKey(m, "maxProperties") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 18; + v18 := compiler.MapValueForKey(m, "minProperties") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 19; + v19 := compiler.MapValueForKey(m, "required") + if v19 != nil { + v, ok := v19.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // AdditionalPropertiesItem additional_properties = 21; + v21 := compiler.MapValueForKey(m, "additionalProperties") + if v21 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) + if err != nil { + errors = append(errors, err) + } + } + // TypeItem type = 22; + v22 := compiler.MapValueForKey(m, "type") + if v22 != nil { + var err error + x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 23; + v23 := compiler.MapValueForKey(m, "items") + if v23 != nil { + var err error + x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Schema all_of = 24; + v24 := compiler.MapValueForKey(m, "allOf") + if v24 != nil { + // repeated Schema + x.AllOf = make([]*Schema, 0) + a, ok := v24.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSchema(item, compiler.NewContext("allOf", context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // Properties properties = 25; + v25 := compiler.MapValueForKey(m, "properties") + if v25 != nil { + var err error + x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) + if err != nil { + errors = append(errors, err) + } + } + // string discriminator = 26; + v26 := compiler.MapValueForKey(m, "discriminator") + if v26 != nil { + x.Discriminator, ok = v26.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 27; + v27 := compiler.MapValueForKey(m, "readOnly") + if v27 != nil { + x.ReadOnly, ok = v27.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 28; + v28 := compiler.MapValueForKey(m, "xml") + if v28 != nil { + var err error + x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 29; + v29 := compiler.MapValueForKey(m, "externalDocs") + if v29 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 30; + v30 := compiler.MapValueForKey(m, "example") + if v30 != nil { + var err error + x.Example, err = NewAny(v30, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 31; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. +func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { + errors := make([]error, 0) + x := &SchemaItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // FileSchema file_schema = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_FileSchema{FileSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. +func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { + errors := make([]error, 0) + x := &SecurityDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecurityDefinitionsItem additional_properties = 1; + // MAP: SecurityDefinitionsItem + x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSecurityDefinitionsItem{} + pair.Name = k + var err error + pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. +func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &SecurityDefinitionsItem{} + matched := false + // BasicAuthenticationSecurity basic_authentication_security = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // ApiKeySecurity api_key_security = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ImplicitSecurity oauth2_implicit_security = 3; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2PasswordSecurity oauth2_password_security = 4; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ApplicationSecurity oauth2_application_security = 5; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. +func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStringArray creates an object of type StringArray if possible, returning an error if not. +func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + a, ok := in.([]interface{}) + if !ok { + message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Value = make([]string, 0) + for _, s := range a { + x.Value = append(x.Value, s.(string)) + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTag creates an object of type Tag if possible, returning an error if not. +func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. +func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { + errors := make([]error, 0) + x := &TypeItem{} + switch in := in.(type) { + case string: + x.Value = make([]string, 0) + x.Value = append(x.Value, in) + case []interface{}: + x.Value = make([]string, 0) + for _, v := range in { + value, ok := v.(string) + if ok { + x.Value = append(x.Value, value) + } else { + message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) + errors = append(errors, compiler.NewError(context, message)) + } + } + default: + message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. +func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { + errors := make([]error, 0) + x := &VendorExtension{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewXml creates an object of type Xml if possible, returning an error if not. +func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Any objects. +func (m *Any) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ApiKeySecurity objects. +func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BodyParameter objects. +func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Contact objects. +func (m *Contact) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Default objects. +func (m *Default) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Definitions objects. +func (m *Definitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Document objects. +func (m *Document) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Definitions != nil { + _, err := m.Definitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.SecurityDefinitions != nil { + _, err := m.SecurityDefinitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Examples objects. +func (m *Examples) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExternalDocs objects. +func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FileSchema objects. +func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Header objects. +func (m *Header) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Headers objects. +func (m *Headers) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Info objects. +func (m *Info) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ItemsItem objects. +func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.Schema { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside JsonReference objects. +func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewJsonReference(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside License objects. +func (m *License) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedAny objects. +func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedHeader objects. +func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedParameter objects. +func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedPathItem objects. +func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponse objects. +func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponseValue objects. +func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSchema objects. +func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedString objects. +func (m *NamedString) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedStringArray objects. +func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NonBodyParameter objects. +func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) + if ok { + _, err := p.HeaderParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) + if ok { + _, err := p.FormDataParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) + if ok { + _, err := p.QueryParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) + if ok { + _, err := p.PathParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2Scopes objects. +func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Operation objects. +func (m *Operation) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Parameter objects. +func (m *Parameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*Parameter_BodyParameter) + if ok { + _, err := p.BodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*Parameter_NonBodyParameter) + if ok { + _, err := p.NonBodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParameterDefinitions objects. +func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParametersItem objects. +func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParametersItem_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParametersItem_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewParametersItem(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathItem objects. +func (m *PathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathParameterSubSchema objects. +func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Paths objects. +func (m *Paths) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PrimitivesItems objects. +func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Properties objects. +func (m *Properties) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside QueryParameterSubSchema objects. +func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Response objects. +func (m *Response) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseDefinitions objects. +func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseValue objects. +func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseValue_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseValue_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewResponseValue(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Responses objects. +func (m *Responses) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.ResponseCode { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Schema objects. +func (m *Schema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewSchema(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Type != nil { + _, err := m.Type.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemaItem objects. +func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaItem_FileSchema) + if ok { + _, err := p.FileSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitions objects. +func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) + if ok { + _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) + if ok { + _, err := p.ApiKeySecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) + if ok { + _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) + if ok { + _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) + if ok { + _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) + if ok { + _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityRequirement objects. +func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside StringArray objects. +func (m *StringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Tag objects. +func (m *Tag) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside TypeItem objects. +func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside VendorExtension objects. +func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Xml objects. +func (m *Xml) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. +func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // AdditionalPropertiesItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return v1.Boolean + } + return nil +} + +// ToRawInfo returns a description of Any suitable for JSON or YAML export. +func (m *Any) ToRawInfo() interface{} { + var err error + var info1 []yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info1) + if err == nil { + return info1 + } + var info2 yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info2) + if err == nil { + return info2 + } + var info3 interface{} + err = yaml.Unmarshal([]byte(m.Yaml), &info3) + if err == nil { + return info3 + } + return nil +} + +// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. +func (m *ApiKeySecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. +func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. +func (m *BodyParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Contact suitable for JSON or YAML export. +func (m *Contact) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + } + if m.Email != "" { + info = append(info, yaml.MapItem{Key: "email", Value: m.Email}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Default suitable for JSON or YAML export. +func (m *Default) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. +func (m *Definitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Document suitable for JSON or YAML export. +func (m *Document) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) + // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Host != "" { + info = append(info, yaml.MapItem{Key: "host", Value: m.Host}) + } + if m.BasePath != "" { + info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath}) + } + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) + // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Definitions != nil { + info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()}) + } + // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Parameters != nil { + info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()}) + } + // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Responses != nil { + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) + } + // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "security", Value: items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.SecurityDefinitions != nil { + info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()}) + } + // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Tags) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Tags { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "tags", Value: items}) + } + // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Examples suitable for JSON or YAML export. +func (m *Examples) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. +func (m *ExternalDocs) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. +func (m *FileSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + if m.ReadOnly != false { + info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. +func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Header suitable for JSON or YAML export. +func (m *Header) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. +func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Headers suitable for JSON or YAML export. +func (m *Headers) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Info suitable for JSON or YAML export. +func (m *Info) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.TermsOfService != "" { + info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService}) + } + if m.Contact != nil { + info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()}) + } + // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.License != nil { + info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()}) + } + // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. +func (m *ItemsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if len(m.Schema) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "schema", Value: items}) + } + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. +func (m *JsonReference) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + return info +} + +// ToRawInfo returns a description of License suitable for JSON or YAML export. +func (m *License) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + if m.Url != "" { + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. +func (m *NamedAny) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. +func (m *NamedHeader) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. +func (m *NamedParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. +func (m *NamedPathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. +func (m *NamedResponse) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. +func (m *NamedResponseValue) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. +func (m *NamedSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. +func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. +func (m *NamedString) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Value != "" { + info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) + } + return info +} + +// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. +func (m *NamedStringArray) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. +func (m *NonBodyParameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // NonBodyParameter + // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetHeaderParameterSubSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFormDataParameterSubSchema() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetQueryParameterSubSchema() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetPathParameterSubSchema() + if v3 != nil { + return v3.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. +func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m.Scopes != nil { + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + // always include this required field. + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. +func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m.Scopes != nil { + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + // always include this required field. + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. +func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m.Scopes != nil { + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + // always include this required field. + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. +func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m.Scopes != nil { + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + // always include this required field. + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. +func (m *Oauth2Scopes) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Operation suitable for JSON or YAML export. +func (m *Operation) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if len(m.Tags) != 0 { + info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags}) + } + if m.Summary != "" { + info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.OperationId != "" { + info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) + } + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "parameters", Value: items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + // always include this required field. + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) + // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) + } + if m.Deprecated != false { + info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated}) + } + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "security", Value: items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. +func (m *Parameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // Parameter + // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBodyParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetNonBodyParameter() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. +func (m *ParameterDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. +func (m *ParametersItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ParametersItem + // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. +func (m *PathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.XRef != "" { + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + } + if m.Get != nil { + info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()}) + } + // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Put != nil { + info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()}) + } + // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Post != nil { + info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()}) + } + // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Delete != nil { + info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()}) + } + // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Options != nil { + info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()}) + } + // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Head != nil { + info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()}) + } + // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Patch != nil { + info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()}) + } + // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "parameters", Value: items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. +func (m *PathParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Paths suitable for JSON or YAML export. +func (m *Paths) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + if m.Path != nil { + for _, item := range m.Path { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. +func (m *PrimitivesItems) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Properties suitable for JSON or YAML export. +func (m *Properties) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. +func (m *QueryParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Response suitable for JSON or YAML export. +func (m *Response) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + if m.Schema != nil { + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) + } + // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Headers != nil { + info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()}) + } + // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Examples != nil { + info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()}) + } + // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. +func (m *ResponseDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. +func (m *ResponseValue) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ResponseValue + // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetResponse() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Responses suitable for JSON or YAML export. +func (m *Responses) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.ResponseCode != nil { + for _, item := range m.ResponseCode { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Schema suitable for JSON or YAML export. +func (m *Schema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.XRef != "" { + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if m.MaxProperties != 0 { + info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties}) + } + if m.MinProperties != 0 { + info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties}) + } + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.AdditionalProperties != nil { + info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()}) + } + // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Type != nil { + if len(m.Type.Value) == 1 { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]}) + } else { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value}) + } + } + // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Items != nil { + items := make([]interface{}, 0) + for _, item := range m.Items.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "items", Value: items[0]}) + } + // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.AllOf) != 0 { + items := make([]interface{}, 0) + for _, item := range m.AllOf { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "allOf", Value: items}) + } + // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.Properties != nil { + info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()}) + } + // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Discriminator != "" { + info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator}) + } + if m.ReadOnly != false { + info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) + } + if m.Xml != nil { + info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()}) + } + // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. +func (m *SchemaItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SchemaItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFileSchema() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. +func (m *SecurityDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. +func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SecurityDefinitionsItem + // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBasicAuthenticationSecurity() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetApiKeySecurity() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetOauth2ImplicitSecurity() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetOauth2PasswordSecurity() + if v3 != nil { + return v3.ToRawInfo() + } + // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v4 := m.GetOauth2ApplicationSecurity() + if v4 != nil { + return v4.ToRawInfo() + } + // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v5 := m.GetOauth2AccessCodeSecurity() + if v5 != nil { + return v5.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. +func (m *SecurityRequirement) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. +func (m *StringArray) ToRawInfo() interface{} { + return m.Value +} + +// ToRawInfo returns a description of Tag suitable for JSON or YAML export. +func (m *Tag) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. +func (m *TypeItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if len(m.Value) != 0 { + info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) + } + return info +} + +// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. +func (m *VendorExtension) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Xml suitable for JSON or YAML export. +func (m *Xml) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Namespace != "" { + info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace}) + } + if m.Prefix != "" { + info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix}) + } + if m.Attribute != false { + info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute}) + } + if m.Wrapped != false { + info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +var ( + pattern0 = regexp.MustCompile("^x-") + pattern1 = regexp.MustCompile("^/") + pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") +) diff --git a/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go b/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go new file mode 100644 index 000000000..55a6cb516 --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go @@ -0,0 +1,5226 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: openapiv2/OpenAPIv2.proto + +package openapi_v2 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type AdditionalPropertiesItem struct { + // Types that are valid to be assigned to Oneof: + // *AdditionalPropertiesItem_Schema + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } +func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } +func (*AdditionalPropertiesItem) ProtoMessage() {} +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{0} +} + +func (m *AdditionalPropertiesItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AdditionalPropertiesItem.Unmarshal(m, b) +} +func (m *AdditionalPropertiesItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AdditionalPropertiesItem.Marshal(b, m, deterministic) +} +func (m *AdditionalPropertiesItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdditionalPropertiesItem.Merge(m, src) +} +func (m *AdditionalPropertiesItem) XXX_Size() int { + return xxx_messageInfo_AdditionalPropertiesItem.Size(m) +} +func (m *AdditionalPropertiesItem) XXX_DiscardUnknown() { + xxx_messageInfo_AdditionalPropertiesItem.DiscardUnknown(m) +} + +var xxx_messageInfo_AdditionalPropertiesItem proto.InternalMessageInfo + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +} + +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` +} + +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} + +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AdditionalPropertiesItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } +} + +type Any struct { + Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{1} +} + +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetValue() *any.Any { + if m != nil { + return m.Value + } + return nil +} + +func (m *Any) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +type ApiKeySecurity struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } +func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } +func (*ApiKeySecurity) ProtoMessage() {} +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{2} +} + +func (m *ApiKeySecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApiKeySecurity.Unmarshal(m, b) +} +func (m *ApiKeySecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApiKeySecurity.Marshal(b, m, deterministic) +} +func (m *ApiKeySecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApiKeySecurity.Merge(m, src) +} +func (m *ApiKeySecurity) XXX_Size() int { + return xxx_messageInfo_ApiKeySecurity.Size(m) +} +func (m *ApiKeySecurity) XXX_DiscardUnknown() { + xxx_messageInfo_ApiKeySecurity.DiscardUnknown(m) +} + +var xxx_messageInfo_ApiKeySecurity proto.InternalMessageInfo + +func (m *ApiKeySecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ApiKeySecurity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ApiKeySecurity) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *ApiKeySecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BasicAuthenticationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } +func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } +func (*BasicAuthenticationSecurity) ProtoMessage() {} +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{3} +} + +func (m *BasicAuthenticationSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BasicAuthenticationSecurity.Unmarshal(m, b) +} +func (m *BasicAuthenticationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BasicAuthenticationSecurity.Marshal(b, m, deterministic) +} +func (m *BasicAuthenticationSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicAuthenticationSecurity.Merge(m, src) +} +func (m *BasicAuthenticationSecurity) XXX_Size() int { + return xxx_messageInfo_BasicAuthenticationSecurity.Size(m) +} +func (m *BasicAuthenticationSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_BasicAuthenticationSecurity.DiscardUnknown(m) +} + +var xxx_messageInfo_BasicAuthenticationSecurity proto.InternalMessageInfo + +func (m *BasicAuthenticationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BodyParameter struct { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BodyParameter) Reset() { *m = BodyParameter{} } +func (m *BodyParameter) String() string { return proto.CompactTextString(m) } +func (*BodyParameter) ProtoMessage() {} +func (*BodyParameter) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{4} +} + +func (m *BodyParameter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BodyParameter.Unmarshal(m, b) +} +func (m *BodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BodyParameter.Marshal(b, m, deterministic) +} +func (m *BodyParameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_BodyParameter.Merge(m, src) +} +func (m *BodyParameter) XXX_Size() int { + return xxx_messageInfo_BodyParameter.Size(m) +} +func (m *BodyParameter) XXX_DiscardUnknown() { + xxx_messageInfo_BodyParameter.DiscardUnknown(m) +} + +var xxx_messageInfo_BodyParameter proto.InternalMessageInfo + +func (m *BodyParameter) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BodyParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BodyParameter) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *BodyParameter) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *BodyParameter) GetSchema() *Schema { + if m != nil { + return m.Schema + } + return nil +} + +func (m *BodyParameter) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Contact information for the owners of the API. +type Contact struct { + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The URL pointing to the contact information. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + // The email address of the contact person/organization. + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Contact) Reset() { *m = Contact{} } +func (m *Contact) String() string { return proto.CompactTextString(m) } +func (*Contact) ProtoMessage() {} +func (*Contact) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{5} +} + +func (m *Contact) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Contact.Unmarshal(m, b) +} +func (m *Contact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Contact.Marshal(b, m, deterministic) +} +func (m *Contact) XXX_Merge(src proto.Message) { + xxx_messageInfo_Contact.Merge(m, src) +} +func (m *Contact) XXX_Size() int { + return xxx_messageInfo_Contact.Size(m) +} +func (m *Contact) XXX_DiscardUnknown() { + xxx_messageInfo_Contact.DiscardUnknown(m) +} + +var xxx_messageInfo_Contact proto.InternalMessageInfo + +func (m *Contact) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Contact) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *Contact) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *Contact) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Default struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Default) Reset() { *m = Default{} } +func (m *Default) String() string { return proto.CompactTextString(m) } +func (*Default) ProtoMessage() {} +func (*Default) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{6} +} + +func (m *Default) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Default.Unmarshal(m, b) +} +func (m *Default) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Default.Marshal(b, m, deterministic) +} +func (m *Default) XXX_Merge(src proto.Message) { + xxx_messageInfo_Default.Merge(m, src) +} +func (m *Default) XXX_Size() int { + return xxx_messageInfo_Default.Size(m) +} +func (m *Default) XXX_DiscardUnknown() { + xxx_messageInfo_Default.DiscardUnknown(m) +} + +var xxx_messageInfo_Default proto.InternalMessageInfo + +func (m *Default) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +type Definitions struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Definitions) Reset() { *m = Definitions{} } +func (m *Definitions) String() string { return proto.CompactTextString(m) } +func (*Definitions) ProtoMessage() {} +func (*Definitions) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{7} +} + +func (m *Definitions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Definitions.Unmarshal(m, b) +} +func (m *Definitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Definitions.Marshal(b, m, deterministic) +} +func (m *Definitions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Definitions.Merge(m, src) +} +func (m *Definitions) XXX_Size() int { + return xxx_messageInfo_Definitions.Size(m) +} +func (m *Definitions) XXX_DiscardUnknown() { + xxx_messageInfo_Definitions.DiscardUnknown(m) +} + +var xxx_messageInfo_Definitions proto.InternalMessageInfo + +func (m *Definitions) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Document struct { + // The Swagger version of this document. + Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + // The host (name or ip) of the API. Example: 'swagger.io' + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + // The base path to the API. Example: '/api'. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,5,rep,name=schemes,proto3" json:"schemes,omitempty"` + // A list of MIME types accepted by the API. + Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{8} +} + +func (m *Document) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Document.Unmarshal(m, b) +} +func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Document.Marshal(b, m, deterministic) +} +func (m *Document) XXX_Merge(src proto.Message) { + xxx_messageInfo_Document.Merge(m, src) +} +func (m *Document) XXX_Size() int { + return xxx_messageInfo_Document.Size(m) +} +func (m *Document) XXX_DiscardUnknown() { + xxx_messageInfo_Document.DiscardUnknown(m) +} + +var xxx_messageInfo_Document proto.InternalMessageInfo + +func (m *Document) GetSwagger() string { + if m != nil { + return m.Swagger + } + return "" +} + +func (m *Document) GetInfo() *Info { + if m != nil { + return m.Info + } + return nil +} + +func (m *Document) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Document) GetBasePath() string { + if m != nil { + return m.BasePath + } + return "" +} + +func (m *Document) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Document) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Document) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Document) GetPaths() *Paths { + if m != nil { + return m.Paths + } + return nil +} + +func (m *Document) GetDefinitions() *Definitions { + if m != nil { + return m.Definitions + } + return nil +} + +func (m *Document) GetParameters() *ParameterDefinitions { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Document) GetResponses() *ResponseDefinitions { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Document) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { + if m != nil { + return m.SecurityDefinitions + } + return nil +} + +func (m *Document) GetTags() []*Tag { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Document) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Document) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Examples struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Examples) Reset() { *m = Examples{} } +func (m *Examples) String() string { return proto.CompactTextString(m) } +func (*Examples) ProtoMessage() {} +func (*Examples) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{9} +} + +func (m *Examples) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Examples.Unmarshal(m, b) +} +func (m *Examples) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Examples.Marshal(b, m, deterministic) +} +func (m *Examples) XXX_Merge(src proto.Message) { + xxx_messageInfo_Examples.Merge(m, src) +} +func (m *Examples) XXX_Size() int { + return xxx_messageInfo_Examples.Size(m) +} +func (m *Examples) XXX_DiscardUnknown() { + xxx_messageInfo_Examples.DiscardUnknown(m) +} + +var xxx_messageInfo_Examples proto.InternalMessageInfo + +func (m *Examples) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// information about external documentation +type ExternalDocs struct { + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } +func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } +func (*ExternalDocs) ProtoMessage() {} +func (*ExternalDocs) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{10} +} + +func (m *ExternalDocs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExternalDocs.Unmarshal(m, b) +} +func (m *ExternalDocs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExternalDocs.Marshal(b, m, deterministic) +} +func (m *ExternalDocs) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalDocs.Merge(m, src) +} +func (m *ExternalDocs) XXX_Size() int { + return xxx_messageInfo_ExternalDocs.Size(m) +} +func (m *ExternalDocs) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalDocs.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalDocs proto.InternalMessageInfo + +func (m *ExternalDocs) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ExternalDocs) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ExternalDocs) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type FileSchema struct { + Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileSchema) Reset() { *m = FileSchema{} } +func (m *FileSchema) String() string { return proto.CompactTextString(m) } +func (*FileSchema) ProtoMessage() {} +func (*FileSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{11} +} + +func (m *FileSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileSchema.Unmarshal(m, b) +} +func (m *FileSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileSchema.Marshal(b, m, deterministic) +} +func (m *FileSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSchema.Merge(m, src) +} +func (m *FileSchema) XXX_Size() int { + return xxx_messageInfo_FileSchema.Size(m) +} +func (m *FileSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FileSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSchema proto.InternalMessageInfo + +func (m *FileSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FileSchema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *FileSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FileSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FileSchema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *FileSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FileSchema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *FileSchema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *FileSchema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *FileSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type FormDataParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } +func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*FormDataParameterSubSchema) ProtoMessage() {} +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{12} +} + +func (m *FormDataParameterSubSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FormDataParameterSubSchema.Unmarshal(m, b) +} +func (m *FormDataParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FormDataParameterSubSchema.Marshal(b, m, deterministic) +} +func (m *FormDataParameterSubSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FormDataParameterSubSchema.Merge(m, src) +} +func (m *FormDataParameterSubSchema) XXX_Size() int { + return xxx_messageInfo_FormDataParameterSubSchema.Size(m) +} +func (m *FormDataParameterSubSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FormDataParameterSubSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FormDataParameterSubSchema proto.InternalMessageInfo + +func (m *FormDataParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *FormDataParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FormDataParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *FormDataParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FormDataParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *FormDataParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *FormDataParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *FormDataParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Header struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{13} +} + +func (m *Header) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Header.Unmarshal(m, b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return xxx_messageInfo_Header.Size(m) +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo + +func (m *Header) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Header) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Header) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *Header) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *Header) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Header) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Header) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Header) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Header) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Header) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Header) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Header) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Header) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Header) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Header) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Header) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Header) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Header) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Header) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type HeaderParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } +func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*HeaderParameterSubSchema) ProtoMessage() {} +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{14} +} + +func (m *HeaderParameterSubSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HeaderParameterSubSchema.Unmarshal(m, b) +} +func (m *HeaderParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HeaderParameterSubSchema.Marshal(b, m, deterministic) +} +func (m *HeaderParameterSubSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_HeaderParameterSubSchema.Merge(m, src) +} +func (m *HeaderParameterSubSchema) XXX_Size() int { + return xxx_messageInfo_HeaderParameterSubSchema.Size(m) +} +func (m *HeaderParameterSubSchema) XXX_DiscardUnknown() { + xxx_messageInfo_HeaderParameterSubSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_HeaderParameterSubSchema proto.InternalMessageInfo + +func (m *HeaderParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *HeaderParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *HeaderParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *HeaderParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *HeaderParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *HeaderParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *HeaderParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *HeaderParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Headers struct { + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Headers) Reset() { *m = Headers{} } +func (m *Headers) String() string { return proto.CompactTextString(m) } +func (*Headers) ProtoMessage() {} +func (*Headers) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{15} +} + +func (m *Headers) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Headers.Unmarshal(m, b) +} +func (m *Headers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Headers.Marshal(b, m, deterministic) +} +func (m *Headers) XXX_Merge(src proto.Message) { + xxx_messageInfo_Headers.Merge(m, src) +} +func (m *Headers) XXX_Size() int { + return xxx_messageInfo_Headers.Size(m) +} +func (m *Headers) XXX_DiscardUnknown() { + xxx_messageInfo_Headers.DiscardUnknown(m) +} + +var xxx_messageInfo_Headers proto.InternalMessageInfo + +func (m *Headers) GetAdditionalProperties() []*NamedHeader { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// General information about the API. +type Info struct { + // A unique and precise title of the API. + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + // A semantic version number of the API. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The terms of service for the API. + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{16} +} + +func (m *Info) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Info.Unmarshal(m, b) +} +func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Info.Marshal(b, m, deterministic) +} +func (m *Info) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info.Merge(m, src) +} +func (m *Info) XXX_Size() int { + return xxx_messageInfo_Info.Size(m) +} +func (m *Info) XXX_DiscardUnknown() { + xxx_messageInfo_Info.DiscardUnknown(m) +} + +var xxx_messageInfo_Info proto.InternalMessageInfo + +func (m *Info) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Info) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Info) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Info) GetTermsOfService() string { + if m != nil { + return m.TermsOfService + } + return "" +} + +func (m *Info) GetContact() *Contact { + if m != nil { + return m.Contact + } + return nil +} + +func (m *Info) GetLicense() *License { + if m != nil { + return m.License + } + return nil +} + +func (m *Info) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type ItemsItem struct { + Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ItemsItem) Reset() { *m = ItemsItem{} } +func (m *ItemsItem) String() string { return proto.CompactTextString(m) } +func (*ItemsItem) ProtoMessage() {} +func (*ItemsItem) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{17} +} + +func (m *ItemsItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ItemsItem.Unmarshal(m, b) +} +func (m *ItemsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ItemsItem.Marshal(b, m, deterministic) +} +func (m *ItemsItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_ItemsItem.Merge(m, src) +} +func (m *ItemsItem) XXX_Size() int { + return xxx_messageInfo_ItemsItem.Size(m) +} +func (m *ItemsItem) XXX_DiscardUnknown() { + xxx_messageInfo_ItemsItem.DiscardUnknown(m) +} + +var xxx_messageInfo_ItemsItem proto.InternalMessageInfo + +func (m *ItemsItem) GetSchema() []*Schema { + if m != nil { + return m.Schema + } + return nil +} + +type JsonReference struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *JsonReference) Reset() { *m = JsonReference{} } +func (m *JsonReference) String() string { return proto.CompactTextString(m) } +func (*JsonReference) ProtoMessage() {} +func (*JsonReference) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{18} +} + +func (m *JsonReference) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_JsonReference.Unmarshal(m, b) +} +func (m *JsonReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_JsonReference.Marshal(b, m, deterministic) +} +func (m *JsonReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_JsonReference.Merge(m, src) +} +func (m *JsonReference) XXX_Size() int { + return xxx_messageInfo_JsonReference.Size(m) +} +func (m *JsonReference) XXX_DiscardUnknown() { + xxx_messageInfo_JsonReference.DiscardUnknown(m) +} + +var xxx_messageInfo_JsonReference proto.InternalMessageInfo + +func (m *JsonReference) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *JsonReference) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type License struct { + // The name of the license type. It's encouraged to use an OSI compatible license. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The URL pointing to the license. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *License) Reset() { *m = License{} } +func (m *License) String() string { return proto.CompactTextString(m) } +func (*License) ProtoMessage() {} +func (*License) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{19} +} + +func (m *License) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_License.Unmarshal(m, b) +} +func (m *License) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_License.Marshal(b, m, deterministic) +} +func (m *License) XXX_Merge(src proto.Message) { + xxx_messageInfo_License.Merge(m, src) +} +func (m *License) XXX_Size() int { + return xxx_messageInfo_License.Size(m) +} +func (m *License) XXX_DiscardUnknown() { + xxx_messageInfo_License.DiscardUnknown(m) +} + +var xxx_messageInfo_License proto.InternalMessageInfo + +func (m *License) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *License) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *License) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedAny) Reset() { *m = NamedAny{} } +func (m *NamedAny) String() string { return proto.CompactTextString(m) } +func (*NamedAny) ProtoMessage() {} +func (*NamedAny) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{20} +} + +func (m *NamedAny) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedAny.Unmarshal(m, b) +} +func (m *NamedAny) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedAny.Marshal(b, m, deterministic) +} +func (m *NamedAny) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedAny.Merge(m, src) +} +func (m *NamedAny) XXX_Size() int { + return xxx_messageInfo_NamedAny.Size(m) +} +func (m *NamedAny) XXX_DiscardUnknown() { + xxx_messageInfo_NamedAny.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedAny proto.InternalMessageInfo + +func (m *NamedAny) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedAny) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +type NamedHeader struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedHeader) Reset() { *m = NamedHeader{} } +func (m *NamedHeader) String() string { return proto.CompactTextString(m) } +func (*NamedHeader) ProtoMessage() {} +func (*NamedHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{21} +} + +func (m *NamedHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedHeader.Unmarshal(m, b) +} +func (m *NamedHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedHeader.Marshal(b, m, deterministic) +} +func (m *NamedHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedHeader.Merge(m, src) +} +func (m *NamedHeader) XXX_Size() int { + return xxx_messageInfo_NamedHeader.Size(m) +} +func (m *NamedHeader) XXX_DiscardUnknown() { + xxx_messageInfo_NamedHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedHeader proto.InternalMessageInfo + +func (m *NamedHeader) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedHeader) GetValue() *Header { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +type NamedParameter struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedParameter) Reset() { *m = NamedParameter{} } +func (m *NamedParameter) String() string { return proto.CompactTextString(m) } +func (*NamedParameter) ProtoMessage() {} +func (*NamedParameter) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{22} +} + +func (m *NamedParameter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedParameter.Unmarshal(m, b) +} +func (m *NamedParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedParameter.Marshal(b, m, deterministic) +} +func (m *NamedParameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedParameter.Merge(m, src) +} +func (m *NamedParameter) XXX_Size() int { + return xxx_messageInfo_NamedParameter.Size(m) +} +func (m *NamedParameter) XXX_DiscardUnknown() { + xxx_messageInfo_NamedParameter.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedParameter proto.InternalMessageInfo + +func (m *NamedParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedParameter) GetValue() *Parameter { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } +func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } +func (*NamedPathItem) ProtoMessage() {} +func (*NamedPathItem) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{23} +} + +func (m *NamedPathItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedPathItem.Unmarshal(m, b) +} +func (m *NamedPathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedPathItem.Marshal(b, m, deterministic) +} +func (m *NamedPathItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedPathItem.Merge(m, src) +} +func (m *NamedPathItem) XXX_Size() int { + return xxx_messageInfo_NamedPathItem.Size(m) +} +func (m *NamedPathItem) XXX_DiscardUnknown() { + xxx_messageInfo_NamedPathItem.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedPathItem proto.InternalMessageInfo + +func (m *NamedPathItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedPathItem) GetValue() *PathItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +type NamedResponse struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedResponse) Reset() { *m = NamedResponse{} } +func (m *NamedResponse) String() string { return proto.CompactTextString(m) } +func (*NamedResponse) ProtoMessage() {} +func (*NamedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{24} +} + +func (m *NamedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedResponse.Unmarshal(m, b) +} +func (m *NamedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedResponse.Marshal(b, m, deterministic) +} +func (m *NamedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedResponse.Merge(m, src) +} +func (m *NamedResponse) XXX_Size() int { + return xxx_messageInfo_NamedResponse.Size(m) +} +func (m *NamedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NamedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedResponse proto.InternalMessageInfo + +func (m *NamedResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponse) GetValue() *Response { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +type NamedResponseValue struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } +func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } +func (*NamedResponseValue) ProtoMessage() {} +func (*NamedResponseValue) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{25} +} + +func (m *NamedResponseValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedResponseValue.Unmarshal(m, b) +} +func (m *NamedResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedResponseValue.Marshal(b, m, deterministic) +} +func (m *NamedResponseValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedResponseValue.Merge(m, src) +} +func (m *NamedResponseValue) XXX_Size() int { + return xxx_messageInfo_NamedResponseValue.Size(m) +} +func (m *NamedResponseValue) XXX_DiscardUnknown() { + xxx_messageInfo_NamedResponseValue.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedResponseValue proto.InternalMessageInfo + +func (m *NamedResponseValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponseValue) GetValue() *ResponseValue { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +type NamedSchema struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedSchema) Reset() { *m = NamedSchema{} } +func (m *NamedSchema) String() string { return proto.CompactTextString(m) } +func (*NamedSchema) ProtoMessage() {} +func (*NamedSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{26} +} + +func (m *NamedSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedSchema.Unmarshal(m, b) +} +func (m *NamedSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedSchema.Marshal(b, m, deterministic) +} +func (m *NamedSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedSchema.Merge(m, src) +} +func (m *NamedSchema) XXX_Size() int { + return xxx_messageInfo_NamedSchema.Size(m) +} +func (m *NamedSchema) XXX_DiscardUnknown() { + xxx_messageInfo_NamedSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedSchema proto.InternalMessageInfo + +func (m *NamedSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSchema) GetValue() *Schema { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +type NamedSecurityDefinitionsItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } +func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{27} +} + +func (m *NamedSecurityDefinitionsItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedSecurityDefinitionsItem.Unmarshal(m, b) +} +func (m *NamedSecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedSecurityDefinitionsItem.Marshal(b, m, deterministic) +} +func (m *NamedSecurityDefinitionsItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedSecurityDefinitionsItem.Merge(m, src) +} +func (m *NamedSecurityDefinitionsItem) XXX_Size() int { + return xxx_messageInfo_NamedSecurityDefinitionsItem.Size(m) +} +func (m *NamedSecurityDefinitionsItem) XXX_DiscardUnknown() { + xxx_messageInfo_NamedSecurityDefinitionsItem.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedSecurityDefinitionsItem proto.InternalMessageInfo + +func (m *NamedSecurityDefinitionsItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedString) Reset() { *m = NamedString{} } +func (m *NamedString) String() string { return proto.CompactTextString(m) } +func (*NamedString) ProtoMessage() {} +func (*NamedString) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{28} +} + +func (m *NamedString) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedString.Unmarshal(m, b) +} +func (m *NamedString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedString.Marshal(b, m, deterministic) +} +func (m *NamedString) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedString.Merge(m, src) +} +func (m *NamedString) XXX_Size() int { + return xxx_messageInfo_NamedString.Size(m) +} +func (m *NamedString) XXX_DiscardUnknown() { + xxx_messageInfo_NamedString.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedString proto.InternalMessageInfo + +func (m *NamedString) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } +func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } +func (*NamedStringArray) ProtoMessage() {} +func (*NamedStringArray) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{29} +} + +func (m *NamedStringArray) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedStringArray.Unmarshal(m, b) +} +func (m *NamedStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedStringArray.Marshal(b, m, deterministic) +} +func (m *NamedStringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedStringArray.Merge(m, src) +} +func (m *NamedStringArray) XXX_Size() int { + return xxx_messageInfo_NamedStringArray.Size(m) +} +func (m *NamedStringArray) XXX_DiscardUnknown() { + xxx_messageInfo_NamedStringArray.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedStringArray proto.InternalMessageInfo + +func (m *NamedStringArray) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedStringArray) GetValue() *StringArray { + if m != nil { + return m.Value + } + return nil +} + +type NonBodyParameter struct { + // Types that are valid to be assigned to Oneof: + // *NonBodyParameter_HeaderParameterSubSchema + // *NonBodyParameter_FormDataParameterSubSchema + // *NonBodyParameter_QueryParameterSubSchema + // *NonBodyParameter_PathParameterSubSchema + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } +func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } +func (*NonBodyParameter) ProtoMessage() {} +func (*NonBodyParameter) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{30} +} + +func (m *NonBodyParameter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NonBodyParameter.Unmarshal(m, b) +} +func (m *NonBodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NonBodyParameter.Marshal(b, m, deterministic) +} +func (m *NonBodyParameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonBodyParameter.Merge(m, src) +} +func (m *NonBodyParameter) XXX_Size() int { + return xxx_messageInfo_NonBodyParameter.Size(m) +} +func (m *NonBodyParameter) XXX_DiscardUnknown() { + xxx_messageInfo_NonBodyParameter.DiscardUnknown(m) +} + +var xxx_messageInfo_NonBodyParameter proto.InternalMessageInfo + +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() +} + +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` +} + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { + return x.HeaderParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { + return x.FormDataParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { + return x.QueryParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { + return x.PathParameterSubSchema + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*NonBodyParameter) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } +} + +type Oauth2AccessCodeSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } +func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{31} +} + +func (m *Oauth2AccessCodeSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2AccessCodeSecurity.Unmarshal(m, b) +} +func (m *Oauth2AccessCodeSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2AccessCodeSecurity.Marshal(b, m, deterministic) +} +func (m *Oauth2AccessCodeSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2AccessCodeSecurity.Merge(m, src) +} +func (m *Oauth2AccessCodeSecurity) XXX_Size() int { + return xxx_messageInfo_Oauth2AccessCodeSecurity.Size(m) +} +func (m *Oauth2AccessCodeSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2AccessCodeSecurity.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2AccessCodeSecurity proto.InternalMessageInfo + +func (m *Oauth2AccessCodeSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ApplicationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } +func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ApplicationSecurity) ProtoMessage() {} +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{32} +} + +func (m *Oauth2ApplicationSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2ApplicationSecurity.Unmarshal(m, b) +} +func (m *Oauth2ApplicationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2ApplicationSecurity.Marshal(b, m, deterministic) +} +func (m *Oauth2ApplicationSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2ApplicationSecurity.Merge(m, src) +} +func (m *Oauth2ApplicationSecurity) XXX_Size() int { + return xxx_messageInfo_Oauth2ApplicationSecurity.Size(m) +} +func (m *Oauth2ApplicationSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2ApplicationSecurity.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2ApplicationSecurity proto.InternalMessageInfo + +func (m *Oauth2ApplicationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ImplicitSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } +func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ImplicitSecurity) ProtoMessage() {} +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{33} +} + +func (m *Oauth2ImplicitSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2ImplicitSecurity.Unmarshal(m, b) +} +func (m *Oauth2ImplicitSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2ImplicitSecurity.Marshal(b, m, deterministic) +} +func (m *Oauth2ImplicitSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2ImplicitSecurity.Merge(m, src) +} +func (m *Oauth2ImplicitSecurity) XXX_Size() int { + return xxx_messageInfo_Oauth2ImplicitSecurity.Size(m) +} +func (m *Oauth2ImplicitSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2ImplicitSecurity.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2ImplicitSecurity proto.InternalMessageInfo + +func (m *Oauth2ImplicitSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2PasswordSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } +func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2PasswordSecurity) ProtoMessage() {} +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{34} +} + +func (m *Oauth2PasswordSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2PasswordSecurity.Unmarshal(m, b) +} +func (m *Oauth2PasswordSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2PasswordSecurity.Marshal(b, m, deterministic) +} +func (m *Oauth2PasswordSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2PasswordSecurity.Merge(m, src) +} +func (m *Oauth2PasswordSecurity) XXX_Size() int { + return xxx_messageInfo_Oauth2PasswordSecurity.Size(m) +} +func (m *Oauth2PasswordSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2PasswordSecurity.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2PasswordSecurity proto.InternalMessageInfo + +func (m *Oauth2PasswordSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2PasswordSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2Scopes struct { + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } +func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } +func (*Oauth2Scopes) ProtoMessage() {} +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{35} +} + +func (m *Oauth2Scopes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2Scopes.Unmarshal(m, b) +} +func (m *Oauth2Scopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2Scopes.Marshal(b, m, deterministic) +} +func (m *Oauth2Scopes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2Scopes.Merge(m, src) +} +func (m *Oauth2Scopes) XXX_Size() int { + return xxx_messageInfo_Oauth2Scopes.Size(m) +} +func (m *Oauth2Scopes) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2Scopes.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2Scopes proto.InternalMessageInfo + +func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Operation struct { + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + // A brief summary of the operation. + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + // A longer description of the operation, GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // A unique identifier of the operation. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,6,rep,name=produces,proto3" json:"produces,omitempty"` + // A list of MIME types the API can consume. + Consumes []string `protobuf:"bytes,7,rep,name=consumes,proto3" json:"consumes,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{36} +} + +func (m *Operation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Operation.Unmarshal(m, b) +} +func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Operation.Marshal(b, m, deterministic) +} +func (m *Operation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Operation.Merge(m, src) +} +func (m *Operation) XXX_Size() int { + return xxx_messageInfo_Operation.Size(m) +} +func (m *Operation) XXX_DiscardUnknown() { + xxx_messageInfo_Operation.DiscardUnknown(m) +} + +var xxx_messageInfo_Operation proto.InternalMessageInfo + +func (m *Operation) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Operation) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Operation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Operation) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Operation) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *Operation) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Operation) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Operation) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Operation) GetResponses() *Responses { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Operation) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Operation) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +func (m *Operation) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Operation) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Parameter struct { + // Types that are valid to be assigned to Oneof: + // *Parameter_BodyParameter + // *Parameter_NonBodyParameter + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Parameter) Reset() { *m = Parameter{} } +func (m *Parameter) String() string { return proto.CompactTextString(m) } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{37} +} + +func (m *Parameter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Parameter.Unmarshal(m, b) +} +func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Parameter.Marshal(b, m, deterministic) +} +func (m *Parameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Parameter.Merge(m, src) +} +func (m *Parameter) XXX_Size() int { + return xxx_messageInfo_Parameter.Size(m) +} +func (m *Parameter) XXX_DiscardUnknown() { + xxx_messageInfo_Parameter.DiscardUnknown(m) +} + +var xxx_messageInfo_Parameter proto.InternalMessageInfo + +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` +} + +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` +} + +func (*Parameter_BodyParameter) isParameter_Oneof() {} + +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + +func (m *Parameter) GetOneof() isParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { + return x.BodyParameter + } + return nil +} + +func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { + return x.NonBodyParameter + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Parameter) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } +} + +// One or more JSON representations for parameters +type ParameterDefinitions struct { + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } +func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } +func (*ParameterDefinitions) ProtoMessage() {} +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{38} +} + +func (m *ParameterDefinitions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ParameterDefinitions.Unmarshal(m, b) +} +func (m *ParameterDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ParameterDefinitions.Marshal(b, m, deterministic) +} +func (m *ParameterDefinitions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParameterDefinitions.Merge(m, src) +} +func (m *ParameterDefinitions) XXX_Size() int { + return xxx_messageInfo_ParameterDefinitions.Size(m) +} +func (m *ParameterDefinitions) XXX_DiscardUnknown() { + xxx_messageInfo_ParameterDefinitions.DiscardUnknown(m) +} + +var xxx_messageInfo_ParameterDefinitions proto.InternalMessageInfo + +func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ParametersItem struct { + // Types that are valid to be assigned to Oneof: + // *ParametersItem_Parameter + // *ParametersItem_JsonReference + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ParametersItem) Reset() { *m = ParametersItem{} } +func (m *ParametersItem) String() string { return proto.CompactTextString(m) } +func (*ParametersItem) ProtoMessage() {} +func (*ParametersItem) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{39} +} + +func (m *ParametersItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ParametersItem.Unmarshal(m, b) +} +func (m *ParametersItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ParametersItem.Marshal(b, m, deterministic) +} +func (m *ParametersItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParametersItem.Merge(m, src) +} +func (m *ParametersItem) XXX_Size() int { + return xxx_messageInfo_ParametersItem.Size(m) +} +func (m *ParametersItem) XXX_DiscardUnknown() { + xxx_messageInfo_ParametersItem.DiscardUnknown(m) +} + +var xxx_messageInfo_ParametersItem proto.InternalMessageInfo + +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` +} + +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` +} + +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} + +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + +func (m *ParametersItem) GetOneof() isParametersItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ParametersItem) GetParameter() *Parameter { + if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { + return x.Parameter + } + return nil +} + +func (m *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ParametersItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } +} + +type PathItem struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post,proto3" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete,proto3" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PathItem) Reset() { *m = PathItem{} } +func (m *PathItem) String() string { return proto.CompactTextString(m) } +func (*PathItem) ProtoMessage() {} +func (*PathItem) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{40} +} + +func (m *PathItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PathItem.Unmarshal(m, b) +} +func (m *PathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PathItem.Marshal(b, m, deterministic) +} +func (m *PathItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_PathItem.Merge(m, src) +} +func (m *PathItem) XXX_Size() int { + return xxx_messageInfo_PathItem.Size(m) +} +func (m *PathItem) XXX_DiscardUnknown() { + xxx_messageInfo_PathItem.DiscardUnknown(m) +} + +var xxx_messageInfo_PathItem proto.InternalMessageInfo + +func (m *PathItem) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *PathItem) GetGet() *Operation { + if m != nil { + return m.Get + } + return nil +} + +func (m *PathItem) GetPut() *Operation { + if m != nil { + return m.Put + } + return nil +} + +func (m *PathItem) GetPost() *Operation { + if m != nil { + return m.Post + } + return nil +} + +func (m *PathItem) GetDelete() *Operation { + if m != nil { + return m.Delete + } + return nil +} + +func (m *PathItem) GetOptions() *Operation { + if m != nil { + return m.Options + } + return nil +} + +func (m *PathItem) GetHead() *Operation { + if m != nil { + return m.Head + } + return nil +} + +func (m *PathItem) GetPatch() *Operation { + if m != nil { + return m.Patch + } + return nil +} + +func (m *PathItem) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *PathItem) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type PathParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } +func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*PathParameterSubSchema) ProtoMessage() {} +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{41} +} + +func (m *PathParameterSubSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PathParameterSubSchema.Unmarshal(m, b) +} +func (m *PathParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PathParameterSubSchema.Marshal(b, m, deterministic) +} +func (m *PathParameterSubSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_PathParameterSubSchema.Merge(m, src) +} +func (m *PathParameterSubSchema) XXX_Size() int { + return xxx_messageInfo_PathParameterSubSchema.Size(m) +} +func (m *PathParameterSubSchema) XXX_DiscardUnknown() { + xxx_messageInfo_PathParameterSubSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_PathParameterSubSchema proto.InternalMessageInfo + +func (m *PathParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *PathParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *PathParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PathParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PathParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PathParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PathParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PathParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PathParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PathParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PathParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PathParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PathParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PathParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +type Paths struct { + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Paths) Reset() { *m = Paths{} } +func (m *Paths) String() string { return proto.CompactTextString(m) } +func (*Paths) ProtoMessage() {} +func (*Paths) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{42} +} + +func (m *Paths) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Paths.Unmarshal(m, b) +} +func (m *Paths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Paths.Marshal(b, m, deterministic) +} +func (m *Paths) XXX_Merge(src proto.Message) { + xxx_messageInfo_Paths.Merge(m, src) +} +func (m *Paths) XXX_Size() int { + return xxx_messageInfo_Paths.Size(m) +} +func (m *Paths) XXX_DiscardUnknown() { + xxx_messageInfo_Paths.DiscardUnknown(m) +} + +var xxx_messageInfo_Paths proto.InternalMessageInfo + +func (m *Paths) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func (m *Paths) GetPath() []*NamedPathItem { + if m != nil { + return m.Path + } + return nil +} + +type PrimitivesItems struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } +func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } +func (*PrimitivesItems) ProtoMessage() {} +func (*PrimitivesItems) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{43} +} + +func (m *PrimitivesItems) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrimitivesItems.Unmarshal(m, b) +} +func (m *PrimitivesItems) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrimitivesItems.Marshal(b, m, deterministic) +} +func (m *PrimitivesItems) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrimitivesItems.Merge(m, src) +} +func (m *PrimitivesItems) XXX_Size() int { + return xxx_messageInfo_PrimitivesItems.Size(m) +} +func (m *PrimitivesItems) XXX_DiscardUnknown() { + xxx_messageInfo_PrimitivesItems.DiscardUnknown(m) +} + +var xxx_messageInfo_PrimitivesItems proto.InternalMessageInfo + +func (m *PrimitivesItems) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PrimitivesItems) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PrimitivesItems) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PrimitivesItems) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PrimitivesItems) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PrimitivesItems) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PrimitivesItems) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PrimitivesItems) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PrimitivesItems) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PrimitivesItems) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PrimitivesItems) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PrimitivesItems) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PrimitivesItems) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PrimitivesItems) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PrimitivesItems) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Properties struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Properties) Reset() { *m = Properties{} } +func (m *Properties) String() string { return proto.CompactTextString(m) } +func (*Properties) ProtoMessage() {} +func (*Properties) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{44} +} + +func (m *Properties) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Properties.Unmarshal(m, b) +} +func (m *Properties) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Properties.Marshal(b, m, deterministic) +} +func (m *Properties) XXX_Merge(src proto.Message) { + xxx_messageInfo_Properties.Merge(m, src) +} +func (m *Properties) XXX_Size() int { + return xxx_messageInfo_Properties.Size(m) +} +func (m *Properties) XXX_DiscardUnknown() { + xxx_messageInfo_Properties.DiscardUnknown(m) +} + +var xxx_messageInfo_Properties proto.InternalMessageInfo + +func (m *Properties) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type QueryParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } +func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*QueryParameterSubSchema) ProtoMessage() {} +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{45} +} + +func (m *QueryParameterSubSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryParameterSubSchema.Unmarshal(m, b) +} +func (m *QueryParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryParameterSubSchema.Marshal(b, m, deterministic) +} +func (m *QueryParameterSubSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParameterSubSchema.Merge(m, src) +} +func (m *QueryParameterSubSchema) XXX_Size() int { + return xxx_messageInfo_QueryParameterSubSchema.Size(m) +} +func (m *QueryParameterSubSchema) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParameterSubSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParameterSubSchema proto.InternalMessageInfo + +func (m *QueryParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *QueryParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *QueryParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *QueryParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *QueryParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *QueryParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *QueryParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *QueryParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *QueryParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *QueryParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *QueryParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *QueryParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *QueryParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *QueryParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Response struct { + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{46} +} + +func (m *Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Response.Unmarshal(m, b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) +} +func (m *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(m, src) +} +func (m *Response) XXX_Size() int { + return xxx_messageInfo_Response.Size(m) +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Response proto.InternalMessageInfo + +func (m *Response) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Response) GetSchema() *SchemaItem { + if m != nil { + return m.Schema + } + return nil +} + +func (m *Response) GetHeaders() *Headers { + if m != nil { + return m.Headers + } + return nil +} + +func (m *Response) GetExamples() *Examples { + if m != nil { + return m.Examples + } + return nil +} + +func (m *Response) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// One or more JSON representations for parameters +type ResponseDefinitions struct { + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } +func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } +func (*ResponseDefinitions) ProtoMessage() {} +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{47} +} + +func (m *ResponseDefinitions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResponseDefinitions.Unmarshal(m, b) +} +func (m *ResponseDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResponseDefinitions.Marshal(b, m, deterministic) +} +func (m *ResponseDefinitions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseDefinitions.Merge(m, src) +} +func (m *ResponseDefinitions) XXX_Size() int { + return xxx_messageInfo_ResponseDefinitions.Size(m) +} +func (m *ResponseDefinitions) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseDefinitions.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseDefinitions proto.InternalMessageInfo + +func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ResponseValue struct { + // Types that are valid to be assigned to Oneof: + // *ResponseValue_Response + // *ResponseValue_JsonReference + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseValue) Reset() { *m = ResponseValue{} } +func (m *ResponseValue) String() string { return proto.CompactTextString(m) } +func (*ResponseValue) ProtoMessage() {} +func (*ResponseValue) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{48} +} + +func (m *ResponseValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResponseValue.Unmarshal(m, b) +} +func (m *ResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResponseValue.Marshal(b, m, deterministic) +} +func (m *ResponseValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseValue.Merge(m, src) +} +func (m *ResponseValue) XXX_Size() int { + return xxx_messageInfo_ResponseValue.Size(m) +} +func (m *ResponseValue) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseValue proto.InternalMessageInfo + +type isResponseValue_Oneof interface { + isResponseValue_Oneof() +} + +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` +} + +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` +} + +func (*ResponseValue_Response) isResponseValue_Oneof() {} + +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +func (m *ResponseValue) GetOneof() isResponseValue_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ResponseValue) GetResponse() *Response { + if x, ok := m.GetOneof().(*ResponseValue_Response); ok { + return x.Response + } + return nil +} + +func (m *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ResponseValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } +} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Responses) Reset() { *m = Responses{} } +func (m *Responses) String() string { return proto.CompactTextString(m) } +func (*Responses) ProtoMessage() {} +func (*Responses) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{49} +} + +func (m *Responses) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Responses.Unmarshal(m, b) +} +func (m *Responses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Responses.Marshal(b, m, deterministic) +} +func (m *Responses) XXX_Merge(src proto.Message) { + xxx_messageInfo_Responses.Merge(m, src) +} +func (m *Responses) XXX_Size() int { + return xxx_messageInfo_Responses.Size(m) +} +func (m *Responses) XXX_DiscardUnknown() { + xxx_messageInfo_Responses.DiscardUnknown(m) +} + +var xxx_messageInfo_Responses proto.InternalMessageInfo + +func (m *Responses) GetResponseCode() []*NamedResponseValue { + if m != nil { + return m.ResponseCode + } + return nil +} + +func (m *Responses) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type Schema struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required,proto3" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type,proto3" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items,proto3" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties,proto3" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml,proto3" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{50} +} + +func (m *Schema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema.Unmarshal(m, b) +} +func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema.Marshal(b, m, deterministic) +} +func (m *Schema) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema.Merge(m, src) +} +func (m *Schema) XXX_Size() int { + return xxx_messageInfo_Schema.Size(m) +} +func (m *Schema) XXX_DiscardUnknown() { + xxx_messageInfo_Schema.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema proto.InternalMessageInfo + +func (m *Schema) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *Schema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Schema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Schema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Schema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Schema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Schema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Schema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Schema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Schema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Schema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Schema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Schema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Schema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Schema) GetMaxProperties() int64 { + if m != nil { + return m.MaxProperties + } + return 0 +} + +func (m *Schema) GetMinProperties() int64 { + if m != nil { + return m.MinProperties + } + return 0 +} + +func (m *Schema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *Schema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +func (m *Schema) GetType() *TypeItem { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema) GetItems() *ItemsItem { + if m != nil { + return m.Items + } + return nil +} + +func (m *Schema) GetAllOf() []*Schema { + if m != nil { + return m.AllOf + } + return nil +} + +func (m *Schema) GetProperties() *Properties { + if m != nil { + return m.Properties + } + return nil +} + +func (m *Schema) GetDiscriminator() string { + if m != nil { + return m.Discriminator + } + return "" +} + +func (m *Schema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *Schema) GetXml() *Xml { + if m != nil { + return m.Xml + } + return nil +} + +func (m *Schema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Schema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *Schema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type SchemaItem struct { + // Types that are valid to be assigned to Oneof: + // *SchemaItem_Schema + // *SchemaItem_FileSchema + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SchemaItem) Reset() { *m = SchemaItem{} } +func (m *SchemaItem) String() string { return proto.CompactTextString(m) } +func (*SchemaItem) ProtoMessage() {} +func (*SchemaItem) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{51} +} + +func (m *SchemaItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SchemaItem.Unmarshal(m, b) +} +func (m *SchemaItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SchemaItem.Marshal(b, m, deterministic) +} +func (m *SchemaItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_SchemaItem.Merge(m, src) +} +func (m *SchemaItem) XXX_Size() int { + return xxx_messageInfo_SchemaItem.Size(m) +} +func (m *SchemaItem) XXX_DiscardUnknown() { + xxx_messageInfo_SchemaItem.DiscardUnknown(m) +} + +var xxx_messageInfo_SchemaItem proto.InternalMessageInfo + +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() +} + +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +} + +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` +} + +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} + +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SchemaItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { + return x.FileSchema + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*SchemaItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } +} + +type SecurityDefinitions struct { + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } +func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitions) ProtoMessage() {} +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{52} +} + +func (m *SecurityDefinitions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityDefinitions.Unmarshal(m, b) +} +func (m *SecurityDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityDefinitions.Marshal(b, m, deterministic) +} +func (m *SecurityDefinitions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityDefinitions.Merge(m, src) +} +func (m *SecurityDefinitions) XXX_Size() int { + return xxx_messageInfo_SecurityDefinitions.Size(m) +} +func (m *SecurityDefinitions) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityDefinitions.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityDefinitions proto.InternalMessageInfo + +func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type SecurityDefinitionsItem struct { + // Types that are valid to be assigned to Oneof: + // *SecurityDefinitionsItem_BasicAuthenticationSecurity + // *SecurityDefinitionsItem_ApiKeySecurity + // *SecurityDefinitionsItem_Oauth2ImplicitSecurity + // *SecurityDefinitionsItem_Oauth2PasswordSecurity + // *SecurityDefinitionsItem_Oauth2ApplicationSecurity + // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } +func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitionsItem) ProtoMessage() {} +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{53} +} + +func (m *SecurityDefinitionsItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityDefinitionsItem.Unmarshal(m, b) +} +func (m *SecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityDefinitionsItem.Marshal(b, m, deterministic) +} +func (m *SecurityDefinitionsItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityDefinitionsItem.Merge(m, src) +} +func (m *SecurityDefinitionsItem) XXX_Size() int { + return xxx_messageInfo_SecurityDefinitionsItem.Size(m) +} +func (m *SecurityDefinitionsItem) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityDefinitionsItem.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityDefinitionsItem proto.InternalMessageInfo + +type isSecurityDefinitionsItem_Oneof interface { + isSecurityDefinitionsItem_Oneof() +} + +type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_ApiKeySecurity struct { + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,proto3,oneof"` +} + +func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*SecurityDefinitionsItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } +} + +type SecurityRequirement struct { + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } +func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } +func (*SecurityRequirement) ProtoMessage() {} +func (*SecurityRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{54} +} + +func (m *SecurityRequirement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityRequirement.Unmarshal(m, b) +} +func (m *SecurityRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityRequirement.Marshal(b, m, deterministic) +} +func (m *SecurityRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityRequirement.Merge(m, src) +} +func (m *SecurityRequirement) XXX_Size() int { + return xxx_messageInfo_SecurityRequirement.Size(m) +} +func (m *SecurityRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityRequirement proto.InternalMessageInfo + +func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type StringArray struct { + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringArray) Reset() { *m = StringArray{} } +func (m *StringArray) String() string { return proto.CompactTextString(m) } +func (*StringArray) ProtoMessage() {} +func (*StringArray) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{55} +} + +func (m *StringArray) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringArray.Unmarshal(m, b) +} +func (m *StringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringArray.Marshal(b, m, deterministic) +} +func (m *StringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringArray.Merge(m, src) +} +func (m *StringArray) XXX_Size() int { + return xxx_messageInfo_StringArray.Size(m) +} +func (m *StringArray) XXX_DiscardUnknown() { + xxx_messageInfo_StringArray.DiscardUnknown(m) +} + +var xxx_messageInfo_StringArray proto.InternalMessageInfo + +func (m *StringArray) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Tag struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{56} +} + +func (m *Tag) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Tag.Unmarshal(m, b) +} +func (m *Tag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Tag.Marshal(b, m, deterministic) +} +func (m *Tag) XXX_Merge(src proto.Message) { + xxx_messageInfo_Tag.Merge(m, src) +} +func (m *Tag) XXX_Size() int { + return xxx_messageInfo_Tag.Size(m) +} +func (m *Tag) XXX_DiscardUnknown() { + xxx_messageInfo_Tag.DiscardUnknown(m) +} + +var xxx_messageInfo_Tag proto.InternalMessageInfo + +func (m *Tag) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Tag) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Tag) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Tag) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type TypeItem struct { + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TypeItem) Reset() { *m = TypeItem{} } +func (m *TypeItem) String() string { return proto.CompactTextString(m) } +func (*TypeItem) ProtoMessage() {} +func (*TypeItem) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{57} +} + +func (m *TypeItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TypeItem.Unmarshal(m, b) +} +func (m *TypeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TypeItem.Marshal(b, m, deterministic) +} +func (m *TypeItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeItem.Merge(m, src) +} +func (m *TypeItem) XXX_Size() int { + return xxx_messageInfo_TypeItem.Size(m) +} +func (m *TypeItem) XXX_DiscardUnknown() { + xxx_messageInfo_TypeItem.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeItem proto.InternalMessageInfo + +func (m *TypeItem) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +// Any property starting with x- is valid. +type VendorExtension struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VendorExtension) Reset() { *m = VendorExtension{} } +func (m *VendorExtension) String() string { return proto.CompactTextString(m) } +func (*VendorExtension) ProtoMessage() {} +func (*VendorExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{58} +} + +func (m *VendorExtension) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VendorExtension.Unmarshal(m, b) +} +func (m *VendorExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VendorExtension.Marshal(b, m, deterministic) +} +func (m *VendorExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_VendorExtension.Merge(m, src) +} +func (m *VendorExtension) XXX_Size() int { + return xxx_messageInfo_VendorExtension.Size(m) +} +func (m *VendorExtension) XXX_DiscardUnknown() { + xxx_messageInfo_VendorExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_VendorExtension proto.InternalMessageInfo + +func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Xml struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Xml) Reset() { *m = Xml{} } +func (m *Xml) String() string { return proto.CompactTextString(m) } +func (*Xml) ProtoMessage() {} +func (*Xml) Descriptor() ([]byte, []int) { + return fileDescriptor_a43d10d209cd31c2, []int{59} +} + +func (m *Xml) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Xml.Unmarshal(m, b) +} +func (m *Xml) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Xml.Marshal(b, m, deterministic) +} +func (m *Xml) XXX_Merge(src proto.Message) { + xxx_messageInfo_Xml.Merge(m, src) +} +func (m *Xml) XXX_Size() int { + return xxx_messageInfo_Xml.Size(m) +} +func (m *Xml) XXX_DiscardUnknown() { + xxx_messageInfo_Xml.DiscardUnknown(m) +} + +var xxx_messageInfo_Xml proto.InternalMessageInfo + +func (m *Xml) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Xml) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *Xml) GetPrefix() string { + if m != nil { + return m.Prefix + } + return "" +} + +func (m *Xml) GetAttribute() bool { + if m != nil { + return m.Attribute + } + return false +} + +func (m *Xml) GetWrapped() bool { + if m != nil { + return m.Wrapped + } + return false +} + +func (m *Xml) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func init() { + proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") + proto.RegisterType((*Any)(nil), "openapi.v2.Any") + proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") + proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") + proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") + proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") + proto.RegisterType((*Default)(nil), "openapi.v2.Default") + proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") + proto.RegisterType((*Document)(nil), "openapi.v2.Document") + proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") + proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") + proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") + proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") + proto.RegisterType((*Header)(nil), "openapi.v2.Header") + proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") + proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") + proto.RegisterType((*Info)(nil), "openapi.v2.Info") + proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") + proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") + proto.RegisterType((*License)(nil), "openapi.v2.License") + proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") + proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") + proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") + proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") + proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") + proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") + proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") + proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") + proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") + proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") + proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") + proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") + proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") + proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") + proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") + proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") + proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") + proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") + proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") + proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") + proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") + proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") + proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") + proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") + proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") + proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") + proto.RegisterType((*Response)(nil), "openapi.v2.Response") + proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") + proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") + proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") + proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") + proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") + proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") + proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") + proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") + proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") + proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") + proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") + proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") + proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") +} + +func init() { proto.RegisterFile("openapiv2/OpenAPIv2.proto", fileDescriptor_a43d10d209cd31c2) } + +var fileDescriptor_a43d10d209cd31c2 = []byte{ + // 3130 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, + 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, + 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, + 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, + 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, + 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xa6, 0x5f, 0xb7, + 0xe7, 0x61, 0xb9, 0x80, 0x02, 0xad, 0x66, 0xee, 0x3d, 0xe7, 0x9e, 0x7b, 0xfa, 0xf4, 0x79, 0xdd, + 0x73, 0x6e, 0xc3, 0x3a, 0xf1, 0xb0, 0x8b, 0x3c, 0xeb, 0x64, 0xe7, 0xd6, 0x9e, 0x87, 0xdd, 0xdd, + 0xfd, 0x07, 0x27, 0x3b, 0xdb, 0x9e, 0x4f, 0x42, 0xa2, 0x81, 0x00, 0x6d, 0x9f, 0xec, 0x6c, 0xac, + 0x1f, 0x11, 0x72, 0x64, 0xe3, 0x5b, 0x0c, 0x72, 0x38, 0x1c, 0xdc, 0x42, 0xee, 0x88, 0xa3, 0x6d, + 0x39, 0xa0, 0xef, 0xf6, 0xfb, 0x56, 0x68, 0x11, 0x17, 0xd9, 0xfb, 0x3e, 0xf1, 0xb0, 0x1f, 0x5a, + 0x38, 0x78, 0x10, 0x62, 0x47, 0xfb, 0x3f, 0xa8, 0x05, 0xbd, 0x63, 0xec, 0x20, 0xbd, 0x78, 0xa5, + 0x78, 0x6d, 0x61, 0x47, 0xdb, 0x8e, 0x68, 0x6e, 0x1f, 0x30, 0x48, 0xb7, 0x60, 0x08, 0x1c, 0x6d, + 0x03, 0xea, 0x87, 0x84, 0xd8, 0x18, 0xb9, 0x7a, 0xe9, 0x4a, 0xf1, 0x5a, 0xa3, 0x5b, 0x30, 0xe4, + 0xc4, 0xed, 0x3a, 0x54, 0x89, 0x8b, 0xc9, 0x60, 0xeb, 0x1e, 0x94, 0x77, 0xdd, 0x91, 0x76, 0x03, + 0xaa, 0x27, 0xc8, 0x1e, 0x62, 0x41, 0x78, 0x75, 0x9b, 0x33, 0xb8, 0x2d, 0x19, 0xdc, 0xde, 0x75, + 0x47, 0x06, 0x47, 0xd1, 0x34, 0xa8, 0x8c, 0x90, 0x63, 0x33, 0xa2, 0x4d, 0x83, 0xfd, 0xdf, 0xfa, + 0xa2, 0x08, 0xed, 0x5d, 0xcf, 0x7a, 0x17, 0x8f, 0x0e, 0x70, 0x6f, 0xe8, 0x5b, 0xe1, 0x88, 0xa2, + 0x85, 0x23, 0x8f, 0x53, 0x6c, 0x1a, 0xec, 0x3f, 0x9d, 0x73, 0x91, 0x83, 0xe5, 0x52, 0xfa, 0x5f, + 0x6b, 0x43, 0xc9, 0x72, 0xf5, 0x32, 0x9b, 0x29, 0x59, 0xae, 0x76, 0x05, 0x16, 0xfa, 0x38, 0xe8, + 0xf9, 0x96, 0x47, 0x65, 0xa0, 0x57, 0x18, 0x20, 0x3e, 0xa5, 0x7d, 0x0d, 0x3a, 0x27, 0xd8, 0xed, + 0x13, 0xdf, 0xc4, 0xa7, 0x21, 0x76, 0x03, 0x8a, 0x56, 0xbd, 0x52, 0x66, 0x7c, 0xc7, 0x04, 0xf2, + 0x1e, 0x72, 0x70, 0x9f, 0xf2, 0xbd, 0xc4, 0xb1, 0xef, 0x49, 0xe4, 0xad, 0xcf, 0x8a, 0xb0, 0x79, + 0x1b, 0x05, 0x56, 0x6f, 0x77, 0x18, 0x1e, 0x63, 0x37, 0xb4, 0x7a, 0x88, 0x12, 0x9e, 0xc8, 0x7a, + 0x8a, 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, + 0x3e, 0xf2, 0x91, 0x83, 0x43, 0xec, 0xa7, 0x37, 0x2d, 0x66, 0x37, 0x9d, 0x45, 0xa2, 0x1b, 0xd0, + 0xf0, 0xf1, 0x93, 0xa1, 0xe5, 0xe3, 0x3e, 0x13, 0x67, 0xc3, 0x18, 0x8f, 0xb5, 0x1b, 0x63, 0x95, + 0xaa, 0xe6, 0xa9, 0xd4, 0x58, 0xa1, 0x54, 0x0f, 0x58, 0x9b, 0xe7, 0x01, 0x7f, 0x5c, 0x84, 0xfa, + 0x1d, 0xe2, 0x86, 0xa8, 0x17, 0x8e, 0x19, 0x2f, 0xc6, 0x18, 0xef, 0x40, 0x79, 0xe8, 0x4b, 0xc5, + 0xa2, 0x7f, 0xb5, 0x55, 0xa8, 0x62, 0x07, 0x59, 0xb6, 0x78, 0x1a, 0x3e, 0x50, 0x32, 0x52, 0x99, + 0x87, 0x91, 0x47, 0x50, 0xbf, 0x8b, 0x07, 0x68, 0x68, 0x87, 0xda, 0x03, 0xb8, 0x80, 0xc6, 0xf6, + 0x66, 0x7a, 0x63, 0x83, 0xd3, 0x8b, 0x13, 0x08, 0xae, 0x22, 0x85, 0x89, 0x6e, 0x7d, 0x07, 0x16, + 0xee, 0xe2, 0x81, 0xe5, 0x32, 0x48, 0xa0, 0x3d, 0x9c, 0x4c, 0xf9, 0x62, 0x86, 0xb2, 0x10, 0xb7, + 0x9a, 0xf8, 0x1f, 0xab, 0xd0, 0xb8, 0x4b, 0x7a, 0x43, 0x07, 0xbb, 0xa1, 0xa6, 0x43, 0x3d, 0x78, + 0x8a, 0x8e, 0x8e, 0xb0, 0x2f, 0xe4, 0x27, 0x87, 0xda, 0xcb, 0x50, 0xb1, 0xdc, 0x01, 0x61, 0x32, + 0x5c, 0xd8, 0xe9, 0xc4, 0xf7, 0x78, 0xe0, 0x0e, 0x88, 0xc1, 0xa0, 0x54, 0xf8, 0xc7, 0x24, 0x08, + 0x85, 0x54, 0xd9, 0x7f, 0x6d, 0x13, 0x9a, 0x87, 0x28, 0xc0, 0xa6, 0x87, 0xc2, 0x63, 0x61, 0x75, + 0x0d, 0x3a, 0xb1, 0x8f, 0xc2, 0x63, 0xb6, 0x21, 0xe5, 0x0e, 0x07, 0xcc, 0xd2, 0xe8, 0x86, 0x7c, + 0x48, 0x95, 0xab, 0x47, 0xdc, 0x60, 0x48, 0x41, 0x35, 0x06, 0x1a, 0x8f, 0x29, 0xcc, 0xf3, 0x49, + 0x7f, 0xd8, 0xc3, 0x81, 0x5e, 0xe7, 0x30, 0x39, 0xd6, 0x5e, 0x83, 0x2a, 0xdd, 0x29, 0xd0, 0x1b, + 0x8c, 0xd3, 0xe5, 0x38, 0xa7, 0x74, 0xcb, 0xc0, 0xe0, 0x70, 0xed, 0x6d, 0x6a, 0x03, 0x63, 0xa9, + 0xea, 0x4d, 0x86, 0x9e, 0x10, 0x5e, 0x4c, 0xe8, 0x46, 0x1c, 0x57, 0xfb, 0x3a, 0x80, 0x27, 0x6d, + 0x29, 0xd0, 0x81, 0xad, 0xbc, 0x92, 0xdc, 0x48, 0x40, 0xe3, 0x24, 0x62, 0x6b, 0xb4, 0x77, 0xa0, + 0xe9, 0xe3, 0xc0, 0x23, 0x6e, 0x80, 0x03, 0x7d, 0x81, 0x11, 0x78, 0x31, 0x4e, 0xc0, 0x10, 0xc0, + 0xf8, 0xfa, 0x68, 0x85, 0xf6, 0x55, 0x68, 0x04, 0xc2, 0xa9, 0xe8, 0x8b, 0xec, 0xad, 0x27, 0x56, + 0x4b, 0x87, 0x63, 0x70, 0x6b, 0xa4, 0xaf, 0xd6, 0x18, 0x2f, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, + 0xb8, 0x04, 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x38, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, + 0x10, 0x1d, 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x14, 0xa7, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, + 0x03, 0x2d, 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x8f, 0x63, + 0xdf, 0x13, 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x6c, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, + 0xf9, 0x18, 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, + 0xc6, 0xd9, 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, + 0x1b, 0x73, 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, + 0x17, 0x5a, 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0xa4, 0x39, 0x2e, 0x67, 0x39, 0xbe, 0x0e, 0xf5, 0x3e, + 0xf7, 0x6c, 0xcc, 0x86, 0x53, 0xef, 0x98, 0x72, 0x24, 0xe1, 0x89, 0xb0, 0xc0, 0x8d, 0x3a, 0x0a, + 0x0b, 0x32, 0x02, 0xd6, 0x62, 0x11, 0x70, 0x93, 0xda, 0x02, 0xea, 0x9b, 0xc4, 0xb5, 0x47, 0x7a, + 0x5d, 0xc6, 0x11, 0xd4, 0xdf, 0x73, 0xed, 0x51, 0x56, 0x67, 0x1a, 0x73, 0xe9, 0xcc, 0x75, 0xa8, + 0x63, 0xfe, 0xca, 0x85, 0x81, 0x67, 0xd9, 0x16, 0x70, 0xe5, 0x1b, 0x80, 0x79, 0xde, 0xc0, 0x17, + 0x35, 0xd8, 0xb8, 0x4f, 0x7c, 0xe7, 0x2e, 0x0a, 0xd1, 0xd8, 0x01, 0x1c, 0x0c, 0x0f, 0x0f, 0x64, + 0xda, 0x14, 0x89, 0xa5, 0x98, 0x8a, 0x96, 0x3c, 0xb2, 0x96, 0xf2, 0x72, 0x95, 0x72, 0x7e, 0x7c, + 0xae, 0xc4, 0xc2, 0xdc, 0x0d, 0x58, 0x46, 0xb6, 0x4d, 0x9e, 0x9a, 0xd8, 0xf1, 0xc2, 0x91, 0xc9, + 0x13, 0xaf, 0x2a, 0xdb, 0x6a, 0x89, 0x01, 0xee, 0xd1, 0xf9, 0x0f, 0x64, 0xb2, 0x95, 0x79, 0x11, + 0x91, 0xce, 0xd4, 0x13, 0x3a, 0xf3, 0xff, 0x50, 0xb5, 0x42, 0xec, 0x48, 0xd9, 0x6f, 0x26, 0x3c, + 0x9d, 0x6f, 0x39, 0x56, 0x68, 0x9d, 0xf0, 0x4c, 0x32, 0x30, 0x38, 0xa6, 0xf6, 0x3a, 0x2c, 0xf7, + 0x88, 0x6d, 0xe3, 0x1e, 0x65, 0xd6, 0x14, 0x54, 0x9b, 0x8c, 0x6a, 0x27, 0x02, 0xdc, 0xe7, 0xf4, + 0x63, 0xba, 0x05, 0x53, 0x74, 0x4b, 0x87, 0xba, 0x83, 0x4e, 0x2d, 0x67, 0xe8, 0x30, 0xaf, 0x59, + 0x34, 0xe4, 0x90, 0xee, 0x88, 0x4f, 0x7b, 0xf6, 0x30, 0xb0, 0x4e, 0xb0, 0x29, 0x71, 0x16, 0xd9, + 0xc3, 0x77, 0xc6, 0x80, 0x6f, 0x0a, 0x64, 0x4a, 0xc6, 0x72, 0x19, 0x4a, 0x4b, 0x90, 0xe1, 0xc3, + 0x14, 0x19, 0x81, 0xd3, 0x4e, 0x93, 0x11, 0xc8, 0x2f, 0x00, 0x38, 0xe8, 0xd4, 0xb4, 0xb1, 0x7b, + 0x14, 0x1e, 0x33, 0x6f, 0x56, 0x36, 0x9a, 0x0e, 0x3a, 0x7d, 0xc8, 0x26, 0x18, 0xd8, 0x72, 0x25, + 0xb8, 0x23, 0xc0, 0x96, 0x2b, 0xc0, 0x3a, 0xd4, 0x3d, 0x14, 0x52, 0x65, 0xd5, 0x97, 0x79, 0xb0, + 0x15, 0x43, 0x6a, 0x11, 0x94, 0x2e, 0x17, 0xba, 0xc6, 0xd6, 0x35, 0x1c, 0x74, 0xca, 0x24, 0xcc, + 0x80, 0x96, 0x2b, 0x80, 0x2b, 0x02, 0x68, 0xb9, 0x1c, 0xf8, 0x12, 0x2c, 0x0e, 0x5d, 0xeb, 0xc9, + 0x10, 0x0b, 0xf8, 0x2a, 0xe3, 0x7c, 0x81, 0xcf, 0x71, 0x94, 0xab, 0x50, 0xc1, 0xee, 0xd0, 0xd1, + 0x2f, 0x64, 0x5d, 0x35, 0x15, 0x35, 0x03, 0x6a, 0x2f, 0xc2, 0x82, 0x33, 0xb4, 0x43, 0xcb, 0xb3, + 0xb1, 0x49, 0x06, 0xfa, 0x1a, 0x13, 0x12, 0xc8, 0xa9, 0xbd, 0x81, 0xd2, 0x5a, 0x2e, 0xce, 0x65, + 0x2d, 0x55, 0xa8, 0x75, 0x31, 0xea, 0x63, 0x5f, 0x99, 0x16, 0x47, 0xba, 0x58, 0x52, 0xeb, 0x62, + 0xf9, 0x6c, 0xba, 0x58, 0x99, 0xae, 0x8b, 0xd5, 0xd9, 0x75, 0xb1, 0x36, 0x83, 0x2e, 0xd6, 0xa7, + 0xeb, 0x62, 0x63, 0x06, 0x5d, 0x6c, 0xce, 0xa4, 0x8b, 0x30, 0x59, 0x17, 0x17, 0x26, 0xe8, 0xe2, + 0xe2, 0x04, 0x5d, 0x6c, 0x4d, 0xd2, 0xc5, 0xf6, 0x14, 0x5d, 0x5c, 0xca, 0xd7, 0xc5, 0xce, 0x1c, + 0xba, 0xb8, 0x9c, 0xd1, 0xc5, 0x94, 0xb7, 0xd4, 0x66, 0x3b, 0x42, 0xad, 0xcc, 0xa3, 0xad, 0x7f, + 0xab, 0x82, 0xce, 0xb5, 0xf5, 0xdf, 0xe2, 0xd9, 0xa5, 0x85, 0x54, 0x95, 0x16, 0x52, 0x53, 0x5b, + 0x48, 0xfd, 0x6c, 0x16, 0xd2, 0x98, 0x6e, 0x21, 0xcd, 0xd9, 0x2d, 0x04, 0x66, 0xb0, 0x90, 0x85, + 0xe9, 0x16, 0xb2, 0x38, 0x83, 0x85, 0xb4, 0x66, 0xb2, 0x90, 0xf6, 0x64, 0x0b, 0x59, 0x9a, 0x60, + 0x21, 0x9d, 0x09, 0x16, 0xb2, 0x3c, 0xc9, 0x42, 0xb4, 0x29, 0x16, 0xb2, 0x92, 0x6f, 0x21, 0xab, + 0x73, 0x58, 0xc8, 0x85, 0x99, 0xbc, 0xf5, 0xda, 0x3c, 0xfa, 0xff, 0x2d, 0xa8, 0x73, 0xf5, 0x7f, + 0x86, 0xe3, 0x27, 0x5f, 0x98, 0x93, 0x3c, 0x7f, 0x5e, 0x82, 0x0a, 0x3d, 0x40, 0x46, 0x89, 0x69, + 0x31, 0x9e, 0x98, 0xea, 0x50, 0x3f, 0xc1, 0x7e, 0x10, 0x55, 0x46, 0xe4, 0x70, 0x06, 0x43, 0xba, + 0x06, 0x9d, 0x10, 0xfb, 0x4e, 0x60, 0x92, 0x81, 0x19, 0x60, 0xff, 0xc4, 0xea, 0x49, 0xa3, 0x6a, + 0xb3, 0xf9, 0xbd, 0xc1, 0x01, 0x9f, 0xd5, 0x6e, 0x42, 0xbd, 0xc7, 0xcb, 0x07, 0xc2, 0xe9, 0xaf, + 0xc4, 0x1f, 0x42, 0x54, 0x16, 0x0c, 0x89, 0x43, 0xd1, 0x6d, 0xab, 0x87, 0xdd, 0x80, 0xa7, 0x4f, + 0x29, 0xf4, 0x87, 0x1c, 0x64, 0x48, 0x1c, 0xa5, 0xf0, 0xeb, 0xf3, 0x08, 0xff, 0x2d, 0x68, 0x32, + 0x65, 0x60, 0xb5, 0xba, 0x1b, 0xb1, 0x5a, 0x5d, 0x79, 0x72, 0x61, 0x65, 0xeb, 0x2e, 0xb4, 0xbe, + 0x11, 0x10, 0xd7, 0xc0, 0x03, 0xec, 0x63, 0xb7, 0x87, 0xb5, 0x65, 0xa8, 0x98, 0x3e, 0x1e, 0x08, + 0x19, 0x97, 0x0d, 0x3c, 0x98, 0x5e, 0x7f, 0xda, 0xf2, 0xa0, 0x2e, 0x9e, 0x69, 0xc6, 0xe2, 0xca, + 0x99, 0xcf, 0x32, 0xf7, 0xa0, 0x21, 0x81, 0xca, 0x2d, 0x5f, 0x91, 0x55, 0xc5, 0x92, 0xda, 0x01, + 0x71, 0xe8, 0xd6, 0xbb, 0xb0, 0x10, 0x53, 0x40, 0x25, 0xa5, 0x6b, 0x49, 0x4a, 0x09, 0x61, 0x0a, + 0xbd, 0x15, 0xc4, 0xde, 0x87, 0x36, 0x23, 0x16, 0x15, 0xd1, 0x54, 0xf4, 0x5e, 0x4f, 0xd2, 0xbb, + 0xa0, 0x2c, 0x0a, 0x48, 0x92, 0x7b, 0xd0, 0x12, 0x24, 0xc3, 0x63, 0xf6, 0x6e, 0x55, 0x14, 0x6f, + 0x24, 0x29, 0xae, 0xa6, 0xeb, 0x19, 0x74, 0x61, 0x9a, 0xa0, 0xac, 0x1e, 0xcc, 0x4d, 0x50, 0x2e, + 0x94, 0x04, 0x3f, 0x02, 0x2d, 0x41, 0x70, 0x7c, 0x76, 0xc8, 0x50, 0xbd, 0x95, 0xa4, 0xba, 0xae, + 0xa2, 0xca, 0x56, 0xa7, 0x5f, 0x8e, 0x88, 0xa1, 0xf3, 0xbe, 0x1c, 0xa1, 0xe9, 0x82, 0x98, 0x03, + 0x97, 0x38, 0xb1, 0x6c, 0x69, 0x22, 0x57, 0xb0, 0x6f, 0x27, 0xa9, 0x5f, 0x9d, 0x52, 0xf7, 0x88, + 0xcb, 0xf9, 0x2d, 0xc9, 0x7b, 0xe8, 0x5b, 0xee, 0x91, 0x92, 0xfa, 0x6a, 0x9c, 0x7a, 0x53, 0x2e, + 0x7c, 0x0c, 0x9d, 0xd8, 0xc2, 0x5d, 0xdf, 0x47, 0x6a, 0x05, 0xbf, 0x99, 0xe4, 0x2d, 0xe1, 0x53, + 0x63, 0x6b, 0x25, 0xd9, 0xdf, 0x94, 0xa1, 0xf3, 0x1e, 0x71, 0x93, 0x35, 0x5e, 0x0c, 0x9b, 0xc7, + 0x4c, 0x83, 0xcd, 0x71, 0xdd, 0xc9, 0x0c, 0x86, 0x87, 0x66, 0xa2, 0xd2, 0xff, 0x72, 0x56, 0xe1, + 0xb3, 0x09, 0x4e, 0xb7, 0x60, 0xe8, 0xc7, 0x79, 0xc9, 0x8f, 0x0d, 0x97, 0x69, 0xc2, 0x60, 0xf6, + 0x51, 0x88, 0xd4, 0x3b, 0xf1, 0x67, 0x78, 0x35, 0xbe, 0x53, 0xfe, 0x31, 0xb9, 0x5b, 0x30, 0x36, + 0x06, 0xf9, 0x87, 0xe8, 0x43, 0xd8, 0x78, 0x32, 0xc4, 0xfe, 0x48, 0xbd, 0x53, 0x39, 0xfb, 0x26, + 0xdf, 0xa7, 0xd8, 0xca, 0x6d, 0x2e, 0x3e, 0x51, 0x83, 0x34, 0x13, 0xd6, 0x3d, 0x14, 0x1e, 0xab, + 0xb7, 0xe0, 0xc5, 0x8f, 0xad, 0xb4, 0x15, 0x2a, 0x77, 0x58, 0xf3, 0x94, 0x90, 0xa8, 0x49, 0xf2, + 0x79, 0x09, 0xf4, 0x3d, 0x34, 0x0c, 0x8f, 0x77, 0x76, 0x7b, 0x3d, 0x1c, 0x04, 0x77, 0x48, 0x1f, + 0x4f, 0xeb, 0x73, 0x0c, 0x6c, 0xf2, 0x54, 0x56, 0xe5, 0xe9, 0x7f, 0xed, 0x0d, 0x1a, 0x10, 0x88, + 0x87, 0xe5, 0x91, 0x28, 0x51, 0x1a, 0xe1, 0xd4, 0x0f, 0x18, 0xdc, 0x10, 0x78, 0x34, 0x6b, 0xa2, + 0xd3, 0xc4, 0xb7, 0xbe, 0xcf, 0xfa, 0x13, 0x26, 0xf5, 0xdf, 0xe2, 0x40, 0x94, 0x00, 0x3c, 0xf6, + 0x6d, 0x9a, 0xc0, 0x84, 0xe4, 0x53, 0xcc, 0x91, 0x78, 0xfe, 0xd9, 0x60, 0x13, 0x14, 0x98, 0x0a, + 0x1e, 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, + 0x2c, 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xc4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, + 0xab, 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, + 0x33, 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, + 0x07, 0xde, 0xfc, 0xc7, 0xb0, 0x18, 0xe7, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, + 0x3f, 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, + 0x2f, 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, + 0x3b, 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, + 0xbc, 0xe3, 0xb9, 0x07, 0xfd, 0x44, 0xef, 0xa7, 0x96, 0xea, 0xfd, 0xc4, 0x7b, 0x46, 0xf5, 0x54, + 0xcf, 0xe8, 0x2b, 0x89, 0x9e, 0x4d, 0x83, 0x89, 0x6e, 0x43, 0x99, 0x9e, 0xf1, 0x50, 0x1f, 0xef, + 0xd6, 0xbc, 0x19, 0xef, 0xd6, 0x34, 0xb3, 0x99, 0x9d, 0x4c, 0x70, 0x12, 0x3d, 0x9a, 0x58, 0x6b, + 0x0b, 0x92, 0xad, 0xad, 0xcb, 0x00, 0x7d, 0xec, 0xf9, 0xb8, 0x87, 0x42, 0xdc, 0x17, 0xa7, 0xde, + 0xd8, 0xcc, 0xd9, 0xba, 0x3b, 0x2a, 0xf5, 0x6b, 0xcd, 0xa3, 0x7e, 0xbf, 0x2c, 0x42, 0x33, 0xca, + 0x22, 0x6e, 0x43, 0xfb, 0x90, 0xf4, 0x63, 0xf1, 0x56, 0x24, 0x0e, 0x89, 0x04, 0x2f, 0x91, 0x78, + 0x74, 0x0b, 0x46, 0xeb, 0x30, 0x91, 0x89, 0x3c, 0x04, 0xcd, 0x25, 0xae, 0x99, 0xa2, 0xc3, 0xd3, + 0x82, 0x4b, 0x09, 0xa6, 0x52, 0x39, 0x4c, 0xb7, 0x60, 0x74, 0xdc, 0xd4, 0x5c, 0x14, 0x3d, 0x8f, + 0x60, 0x55, 0xd5, 0x67, 0xd3, 0xf6, 0x26, 0xdb, 0xcb, 0x46, 0x46, 0x0c, 0x51, 0x62, 0xae, 0x36, + 0x99, 0xcf, 0x8a, 0xd0, 0x4e, 0x6a, 0x87, 0xf6, 0x25, 0x68, 0xa6, 0x25, 0xa2, 0xce, 0xf5, 0xbb, + 0x05, 0x23, 0xc2, 0xa4, 0xd2, 0xfc, 0x24, 0x20, 0x2e, 0x3d, 0x83, 0xf1, 0x13, 0x99, 0x2a, 0x5d, + 0x4e, 0x1c, 0xd9, 0xa8, 0x34, 0x3f, 0x89, 0x4f, 0x44, 0xcf, 0xff, 0xfb, 0x32, 0x34, 0xc6, 0x47, + 0x07, 0xc5, 0xc9, 0xee, 0x35, 0x28, 0x1f, 0xe1, 0x50, 0x75, 0x12, 0x19, 0xdb, 0xbf, 0x41, 0x31, + 0x28, 0xa2, 0x37, 0x0c, 0x85, 0x7f, 0xcc, 0x43, 0xf4, 0x86, 0xa1, 0x76, 0x1d, 0x2a, 0x1e, 0x09, + 0x64, 0x07, 0x28, 0x07, 0x93, 0xa1, 0x68, 0x37, 0xa1, 0xd6, 0xc7, 0x36, 0x0e, 0xb1, 0x38, 0x51, + 0xe7, 0x20, 0x0b, 0x24, 0xed, 0x16, 0xd4, 0x89, 0xc7, 0xdb, 0x90, 0xb5, 0x49, 0xf8, 0x12, 0x8b, + 0xb2, 0x42, 0x53, 0x52, 0x51, 0xe4, 0xca, 0x63, 0x85, 0xa2, 0xd0, 0x33, 0x99, 0x87, 0xc2, 0xde, + 0xb1, 0x68, 0x5f, 0xe4, 0xe0, 0x72, 0x9c, 0x94, 0x9b, 0x68, 0xce, 0xe5, 0x26, 0xce, 0xdc, 0x41, + 0xfa, 0x6b, 0x15, 0xd6, 0xd4, 0xd9, 0xe4, 0x79, 0x8d, 0xf1, 0xbc, 0xc6, 0xf8, 0xdf, 0x5e, 0x63, + 0x7c, 0x0a, 0x55, 0x76, 0x41, 0x43, 0x49, 0xa9, 0x38, 0x07, 0x25, 0xed, 0x26, 0x54, 0xd8, 0x6d, + 0x93, 0x12, 0x5b, 0xb4, 0xae, 0x70, 0xf8, 0xa2, 0x6e, 0xc2, 0xd0, 0xb6, 0x7e, 0x56, 0x85, 0xa5, + 0x94, 0xd6, 0x9e, 0xf7, 0xa4, 0xce, 0x7b, 0x52, 0x67, 0xea, 0x49, 0xa9, 0x74, 0x58, 0x9b, 0xc7, + 0x1a, 0xbe, 0x0d, 0x10, 0xa5, 0x20, 0xcf, 0xf9, 0xce, 0xd7, 0xaf, 0x6a, 0x70, 0x31, 0xa7, 0x30, + 0x72, 0x7e, 0x4d, 0xe1, 0xfc, 0x9a, 0xc2, 0xf9, 0x35, 0x85, 0xc8, 0x0c, 0xff, 0x5e, 0x84, 0xc6, + 0xb8, 0x9c, 0x3e, 0xfd, 0x62, 0xd7, 0xf6, 0xb8, 0x3b, 0xc3, 0xd3, 0xee, 0xb5, 0x6c, 0xcd, 0x9a, + 0x05, 0x1e, 0x79, 0xf5, 0xf5, 0x26, 0xd4, 0x79, 0x65, 0x55, 0x06, 0x8f, 0x95, 0x6c, 0x41, 0x36, + 0x30, 0x24, 0x8e, 0xf6, 0x06, 0x34, 0xc4, 0x75, 0x25, 0x79, 0xb2, 0x5e, 0x4d, 0x9e, 0xac, 0x39, + 0xcc, 0x18, 0x63, 0x9d, 0xfd, 0x4e, 0x33, 0x86, 0x15, 0xc5, 0x65, 0x44, 0xed, 0xbd, 0xc9, 0x0e, + 0x29, 0x1b, 0x73, 0xc7, 0xad, 0x05, 0xb5, 0x4b, 0xfa, 0x49, 0x11, 0x5a, 0xc9, 0x2e, 0xc3, 0x0e, + 0x75, 0x44, 0x7c, 0x62, 0x7c, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x31, 0xde, 0xf3, 0x3d, + 0x5f, 0xfd, 0xb4, 0x08, 0xcd, 0xf1, 0xc9, 0x5e, 0xbb, 0x03, 0x2d, 0xb9, 0x8d, 0xd9, 0x23, 0x7d, + 0x2c, 0x1e, 0xf4, 0x72, 0xee, 0x83, 0xf2, 0x6e, 0xc7, 0xa2, 0x5c, 0x74, 0x87, 0xf4, 0xd5, 0xad, + 0xc0, 0xd2, 0x3c, 0x6f, 0xe3, 0xd7, 0x4d, 0xa8, 0x09, 0x47, 0xad, 0x38, 0xf1, 0xe5, 0x25, 0x28, + 0xe3, 0xde, 0x6a, 0x79, 0xc2, 0xa5, 0xbf, 0xca, 0xc4, 0x4b, 0x7f, 0xd3, 0x12, 0x8f, 0x94, 0x25, + 0xd6, 0x32, 0x96, 0x18, 0x73, 0x89, 0xf5, 0x19, 0x5c, 0x62, 0x63, 0xba, 0x4b, 0x6c, 0xce, 0xe0, + 0x12, 0x61, 0x26, 0x97, 0xb8, 0x30, 0xd9, 0x25, 0x2e, 0x4e, 0x70, 0x89, 0xad, 0x09, 0x2e, 0xb1, + 0x3d, 0xc9, 0x25, 0x2e, 0x4d, 0x71, 0x89, 0x9d, 0xac, 0x4b, 0x7c, 0x05, 0xda, 0x94, 0x78, 0xcc, + 0xd8, 0xf8, 0x49, 0xa0, 0xe5, 0xa0, 0xd3, 0x58, 0xae, 0x40, 0xd1, 0x2c, 0x37, 0x8e, 0xa6, 0x09, + 0x34, 0xcb, 0x8d, 0xa1, 0xc5, 0x03, 0xfd, 0x4a, 0xea, 0x9a, 0xe6, 0x4c, 0x27, 0x82, 0x8f, 0xf2, + 0x5c, 0xc0, 0x85, 0x6c, 0x6b, 0x29, 0xef, 0xd3, 0x13, 0xb5, 0x37, 0xd0, 0xae, 0x89, 0xb0, 0xbf, + 0x96, 0xb5, 0xfb, 0x47, 0x23, 0x0f, 0xf3, 0xdc, 0x9d, 0x25, 0x03, 0xaf, 0xcb, 0xa0, 0x7f, 0x31, + 0x7b, 0xb8, 0x1f, 0x37, 0xcd, 0x65, 0xb8, 0xbf, 0x0e, 0x35, 0x64, 0xdb, 0x54, 0x3f, 0xf5, 0xdc, + 0xde, 0x79, 0x15, 0xd9, 0xf6, 0xde, 0x40, 0xfb, 0x32, 0x40, 0xec, 0x89, 0xd6, 0xb3, 0xce, 0x3c, + 0xe2, 0xd6, 0x88, 0x61, 0x6a, 0x2f, 0x43, 0xab, 0x6f, 0x51, 0x0b, 0x72, 0x2c, 0x17, 0x85, 0xc4, + 0xd7, 0x37, 0x98, 0x82, 0x24, 0x27, 0x93, 0x57, 0x5e, 0x37, 0x53, 0x57, 0x5e, 0x5f, 0x82, 0xf2, + 0xa9, 0x63, 0xeb, 0x97, 0xb2, 0x16, 0xf7, 0xa1, 0x63, 0x1b, 0x14, 0x96, 0x2d, 0xb3, 0xbe, 0xf0, + 0xac, 0xb7, 0x62, 0x2f, 0x3f, 0xc3, 0xad, 0xd8, 0x17, 0xe7, 0xf1, 0x58, 0x3f, 0x00, 0x88, 0xe2, + 0xde, 0x9c, 0x5f, 0x1a, 0xbd, 0x0d, 0x0b, 0x03, 0xcb, 0xc6, 0x66, 0x7e, 0x48, 0x8d, 0x6e, 0x3c, + 0x77, 0x0b, 0x06, 0x0c, 0xc6, 0xa3, 0xc8, 0x8b, 0x87, 0xb0, 0xa2, 0xe8, 0xe6, 0x6a, 0xdf, 0x9d, + 0x1c, 0xbf, 0xae, 0x65, 0x13, 0xea, 0x9c, 0x96, 0xb0, 0x3a, 0x9c, 0xfd, 0xa9, 0x02, 0x17, 0xf3, + 0x9a, 0xd1, 0x0e, 0xbc, 0x70, 0x88, 0x02, 0xab, 0x67, 0xa2, 0xc4, 0x57, 0x42, 0xe6, 0xb8, 0xe6, + 0xcb, 0x45, 0xf3, 0x5a, 0xa2, 0xc2, 0x9a, 0xff, 0x55, 0x51, 0xb7, 0x60, 0x6c, 0x1e, 0x4e, 0xf8, + 0xe8, 0xe8, 0x3e, 0x74, 0x90, 0x67, 0x99, 0x9f, 0xe2, 0x51, 0xb4, 0x03, 0x97, 0x64, 0xa2, 0xae, + 0x95, 0xfc, 0xca, 0xaa, 0x5b, 0x30, 0xda, 0x28, 0xf9, 0xdd, 0xd5, 0xf7, 0x40, 0x27, 0xac, 0x2d, + 0x61, 0x5a, 0xa2, 0x21, 0x15, 0xd1, 0x2b, 0x67, 0xbb, 0xa2, 0xea, 0xde, 0x55, 0xb7, 0x60, 0xac, + 0x11, 0x75, 0x57, 0x2b, 0xa2, 0xef, 0x89, 0x5e, 0x4f, 0x44, 0xbf, 0x92, 0x47, 0x3f, 0xdd, 0x16, + 0x8a, 0xe8, 0x67, 0x1a, 0x46, 0x47, 0xb0, 0x29, 0xe8, 0xa3, 0xa8, 0x91, 0x18, 0x6d, 0xc1, 0x03, + 0xdc, 0x2b, 0xd9, 0x2d, 0x14, 0x6d, 0xc7, 0x6e, 0xc1, 0x58, 0x27, 0xb9, 0x3d, 0x49, 0x1c, 0x6d, + 0xc4, 0xba, 0xba, 0x2c, 0x5d, 0x88, 0x36, 0xaa, 0x65, 0xbd, 0x63, 0x5e, 0x0f, 0xb8, 0x5b, 0x30, + 0x84, 0x4c, 0xb2, 0xb0, 0x48, 0xc3, 0x8f, 0x23, 0x0d, 0x8f, 0xb5, 0x04, 0xb4, 0xf7, 0x27, 0x6b, + 0xf8, 0xa5, 0x9c, 0xb6, 0x11, 0xbf, 0x58, 0xa0, 0xd6, 0xea, 0xab, 0xb0, 0x10, 0xbf, 0xb9, 0xb0, + 0x1a, 0x7d, 0xdc, 0x57, 0x8e, 0xee, 0x38, 0xfc, 0xb6, 0x08, 0xe5, 0x47, 0x48, 0x7d, 0x2b, 0x62, + 0xfa, 0xc7, 0x6e, 0x19, 0xcf, 0x56, 0x3e, 0xf3, 0x37, 0x22, 0x73, 0x7d, 0xc1, 0x75, 0x05, 0x1a, + 0x32, 0xc2, 0xe4, 0x3c, 0xdf, 0xc7, 0xb0, 0xf4, 0x41, 0xaa, 0xde, 0xf4, 0x1c, 0x3f, 0x26, 0xf9, + 0x5d, 0x11, 0xca, 0x1f, 0x3a, 0xb6, 0x52, 0x7a, 0x97, 0xa0, 0x49, 0x7f, 0x03, 0x0f, 0xf5, 0xe4, + 0xbd, 0x92, 0x68, 0x82, 0x26, 0x7f, 0x9e, 0x8f, 0x07, 0xd6, 0xa9, 0xc8, 0xf2, 0xc4, 0x88, 0xae, + 0x42, 0x61, 0xe8, 0x5b, 0x87, 0xc3, 0x10, 0x8b, 0xcf, 0xf4, 0xa2, 0x09, 0x9a, 0xca, 0x3c, 0xf5, + 0x91, 0xe7, 0xe1, 0xbe, 0x38, 0x82, 0xcb, 0xe1, 0x99, 0xfb, 0x98, 0xb7, 0x5f, 0x85, 0x36, 0xf1, + 0x8f, 0x24, 0xae, 0x79, 0xb2, 0x73, 0x7b, 0x51, 0x7c, 0xbb, 0xba, 0xef, 0x93, 0x90, 0xec, 0x17, + 0x7f, 0x51, 0x2a, 0xef, 0xed, 0x1e, 0x1c, 0xd6, 0xd8, 0xc7, 0xa0, 0x6f, 0xfe, 0x33, 0x00, 0x00, + 0xff, 0xff, 0xdc, 0xb2, 0x46, 0x98, 0xe4, 0x3a, 0x00, 0x00, +} diff --git a/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto b/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto new file mode 100644 index 000000000..557c88072 --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto @@ -0,0 +1,663 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v2; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v2"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +message AdditionalPropertiesItem { + oneof oneof { + Schema schema = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message ApiKeySecurity { + string type = 1; + string name = 2; + string in = 3; + string description = 4; + repeated NamedAny vendor_extension = 5; +} + +message BasicAuthenticationSecurity { + string type = 1; + string description = 2; + repeated NamedAny vendor_extension = 3; +} + +message BodyParameter { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 1; + // The name of the parameter. + string name = 2; + // Determines the location of the parameter. + string in = 3; + // Determines whether or not this parameter is required or optional. + bool required = 4; + Schema schema = 5; + repeated NamedAny vendor_extension = 6; +} + +// Contact information for the owners of the API. +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. + string url = 2; + // The email address of the contact person/organization. + string email = 3; + repeated NamedAny vendor_extension = 4; +} + +message Default { + repeated NamedAny additional_properties = 1; +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +message Definitions { + repeated NamedSchema additional_properties = 1; +} + +message Document { + // The Swagger version of this document. + string swagger = 1; + Info info = 2; + // The host (name or ip) of the API. Example: 'swagger.io' + string host = 3; + // The base path to the API. Example: '/api'. + string base_path = 4; + // The transfer protocol of the API. + repeated string schemes = 5; + // A list of MIME types accepted by the API. + repeated string consumes = 6; + // A list of MIME types the API can produce. + repeated string produces = 7; + Paths paths = 8; + Definitions definitions = 9; + ParameterDefinitions parameters = 10; + ResponseDefinitions responses = 11; + repeated SecurityRequirement security = 12; + SecurityDefinitions security_definitions = 13; + repeated Tag tags = 14; + ExternalDocs external_docs = 15; + repeated NamedAny vendor_extension = 16; +} + +message Examples { + repeated NamedAny additional_properties = 1; +} + +// information about external documentation +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// A deterministic version of a JSON Schema object. +message FileSchema { + string format = 1; + string title = 2; + string description = 3; + Any default = 4; + repeated string required = 5; + string type = 6; + bool read_only = 7; + ExternalDocs external_docs = 8; + Any example = 9; + repeated NamedAny vendor_extension = 10; +} + +message FormDataParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Header { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + string description = 18; + repeated NamedAny vendor_extension = 19; +} + +message HeaderParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +message Headers { + repeated NamedHeader additional_properties = 1; +} + +// General information about the API. +message Info { + // A unique and precise title of the API. + string title = 1; + // A semantic version number of the API. + string version = 2; + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + string description = 3; + // The terms of service for the API. + string terms_of_service = 4; + Contact contact = 5; + License license = 6; + repeated NamedAny vendor_extension = 7; +} + +message ItemsItem { + repeated Schema schema = 1; +} + +message JsonReference { + string _ref = 1; + string description = 2; +} + +message License { + // The name of the license type. It's encouraged to use an OSI compatible license. + string name = 1; + // The URL pointing to the license. + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +message NamedHeader { + // Map key + string name = 1; + // Mapped value + Header value = 2; +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +message NamedParameter { + // Map key + string name = 1; + // Mapped value + Parameter value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +message NamedResponse { + // Map key + string name = 1; + // Mapped value + Response value = 2; +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +message NamedResponseValue { + // Map key + string name = 1; + // Mapped value + ResponseValue value = 2; +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +message NamedSchema { + // Map key + string name = 1; + // Mapped value + Schema value = 2; +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +message NamedSecurityDefinitionsItem { + // Map key + string name = 1; + // Mapped value + SecurityDefinitionsItem value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +message NonBodyParameter { + oneof oneof { + HeaderParameterSubSchema header_parameter_sub_schema = 1; + FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + QueryParameterSubSchema query_parameter_sub_schema = 3; + PathParameterSubSchema path_parameter_sub_schema = 4; + } +} + +message Oauth2AccessCodeSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string token_url = 5; + string description = 6; + repeated NamedAny vendor_extension = 7; +} + +message Oauth2ApplicationSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2ImplicitSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2PasswordSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2Scopes { + repeated NamedString additional_properties = 1; +} + +message Operation { + repeated string tags = 1; + // A brief summary of the operation. + string summary = 2; + // A longer description of the operation, GitHub Flavored Markdown is allowed. + string description = 3; + ExternalDocs external_docs = 4; + // A unique identifier of the operation. + string operation_id = 5; + // A list of MIME types the API can produce. + repeated string produces = 6; + // A list of MIME types the API can consume. + repeated string consumes = 7; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 8; + Responses responses = 9; + // The transfer protocol of the API. + repeated string schemes = 10; + bool deprecated = 11; + repeated SecurityRequirement security = 12; + repeated NamedAny vendor_extension = 13; +} + +message Parameter { + oneof oneof { + BodyParameter body_parameter = 1; + NonBodyParameter non_body_parameter = 2; + } +} + +// One or more JSON representations for parameters +message ParameterDefinitions { + repeated NamedParameter additional_properties = 1; +} + +message ParametersItem { + oneof oneof { + Parameter parameter = 1; + JsonReference json_reference = 2; + } +} + +message PathItem { + string _ref = 1; + Operation get = 2; + Operation put = 3; + Operation post = 4; + Operation delete = 5; + Operation options = 6; + Operation head = 7; + Operation patch = 8; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 9; + repeated NamedAny vendor_extension = 10; +} + +message PathParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +message Paths { + repeated NamedAny vendor_extension = 1; + repeated NamedPathItem path = 2; +} + +message PrimitivesItems { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + repeated NamedAny vendor_extension = 18; +} + +message Properties { + repeated NamedSchema additional_properties = 1; +} + +message QueryParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Response { + string description = 1; + SchemaItem schema = 2; + Headers headers = 3; + Examples examples = 4; + repeated NamedAny vendor_extension = 5; +} + +// One or more JSON representations for parameters +message ResponseDefinitions { + repeated NamedResponse additional_properties = 1; +} + +message ResponseValue { + oneof oneof { + Response response = 1; + JsonReference json_reference = 2; + } +} + +// Response objects names can either be any valid HTTP status code or 'default'. +message Responses { + repeated NamedResponseValue response_code = 1; + repeated NamedAny vendor_extension = 2; +} + +// A deterministic version of a JSON Schema object. +message Schema { + string _ref = 1; + string format = 2; + string title = 3; + string description = 4; + Any default = 5; + double multiple_of = 6; + double maximum = 7; + bool exclusive_maximum = 8; + double minimum = 9; + bool exclusive_minimum = 10; + int64 max_length = 11; + int64 min_length = 12; + string pattern = 13; + int64 max_items = 14; + int64 min_items = 15; + bool unique_items = 16; + int64 max_properties = 17; + int64 min_properties = 18; + repeated string required = 19; + repeated Any enum = 20; + AdditionalPropertiesItem additional_properties = 21; + TypeItem type = 22; + ItemsItem items = 23; + repeated Schema all_of = 24; + Properties properties = 25; + string discriminator = 26; + bool read_only = 27; + Xml xml = 28; + ExternalDocs external_docs = 29; + Any example = 30; + repeated NamedAny vendor_extension = 31; +} + +message SchemaItem { + oneof oneof { + Schema schema = 1; + FileSchema file_schema = 2; + } +} + +message SecurityDefinitions { + repeated NamedSecurityDefinitionsItem additional_properties = 1; +} + +message SecurityDefinitionsItem { + oneof oneof { + BasicAuthenticationSecurity basic_authentication_security = 1; + ApiKeySecurity api_key_security = 2; + Oauth2ImplicitSecurity oauth2_implicit_security = 3; + Oauth2PasswordSecurity oauth2_password_security = 4; + Oauth2ApplicationSecurity oauth2_application_security = 5; + Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + } +} + +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +message StringArray { + repeated string value = 1; +} + +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny vendor_extension = 4; +} + +message TypeItem { + repeated string value = 1; +} + +// Any property starting with x- is valid. +message VendorExtension { + repeated NamedAny additional_properties = 1; +} + +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny vendor_extension = 6; +} + diff --git a/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/README.md b/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/README.md new file mode 100644 index 000000000..836fb32a7 --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/README.md @@ -0,0 +1,16 @@ +# OpenAPI v2 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model +and related code for supporting OpenAPI v2. + +Gnostic applications and plugins can use OpenAPIv2.proto +to generate Protocol Buffer support code for their preferred languages. + +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI +descriptions into the Protocol Buffer-based datastructures +generated from OpenAPIv2.proto. + +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic +compiler generator, and OpenAPIv2.pb.go is generated by +protoc, the Protocol Buffer compiler, and protoc-gen-go, the +Protocol Buffer Go code generation plugin. diff --git a/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json b/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json new file mode 100644 index 000000000..2815a26ea --- /dev/null +++ b/extender/code/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json @@ -0,0 +1,1610 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for parameters" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + }, + "description": { + "type": "string" + } + } + } + } +} diff --git a/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE new file mode 100644 index 000000000..abe5fe170 --- /dev/null +++ b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2016, gRPC Ecosystem +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of grpc-opentracing nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS new file mode 100644 index 000000000..5cfe0175e --- /dev/null +++ b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS @@ -0,0 +1,23 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the GRPC project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of GRPC, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of GRPC. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of GRPC or any code incorporated within this +implementation of GRPC constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of GRPC +shall terminate as of the date such litigation is filed. +Status API Training Shop Blog About diff --git a/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md new file mode 100644 index 000000000..78c49dbbe --- /dev/null +++ b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md @@ -0,0 +1,57 @@ +# OpenTracing support for gRPC in Go + +The `otgrpc` package makes it easy to add OpenTracing support to gRPC-based +systems in Go. + +## Installation + +``` +go get github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc +``` + +## Documentation + +See the basic usage examples below and the [package documentation on +godoc.org](https://godoc.org/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc). + +## Client-side usage example + +Wherever you call `grpc.Dial`: + +```go +// You must have some sort of OpenTracing Tracer instance on hand. +var tracer opentracing.Tracer = ... +... + +// Set up a connection to the server peer. +conn, err := grpc.Dial( + address, + ... // other options + grpc.WithUnaryInterceptor( + otgrpc.OpenTracingClientInterceptor(tracer)), + grpc.WithStreamInterceptor( + otgrpc.OpenTracingStreamClientInterceptor(tracer))) + +// All future RPC activity involving `conn` will be automatically traced. +``` + +## Server-side usage example + +Wherever you call `grpc.NewServer`: + +```go +// You must have some sort of OpenTracing Tracer instance on hand. +var tracer opentracing.Tracer = ... +... + +// Initialize the gRPC server. +s := grpc.NewServer( + ... // other options + grpc.UnaryInterceptor( + otgrpc.OpenTracingServerInterceptor(tracer)), + grpc.StreamInterceptor( + otgrpc.OpenTracingStreamServerInterceptor(tracer))) + +// All future RPC activity involving `s` will be automatically traced. +``` + diff --git a/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go new file mode 100644 index 000000000..3414e55cb --- /dev/null +++ b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go @@ -0,0 +1,239 @@ +package otgrpc + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "io" + "runtime" + "sync/atomic" +) + +// OpenTracingClientInterceptor returns a grpc.UnaryClientInterceptor suitable +// for use in a grpc.Dial call. +// +// For example: +// +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer))) +// +// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC +// metadata; they will also look in the context.Context for an active +// in-process parent Span and establish a ChildOf reference if such a parent +// Span could be found. +func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryClientInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func( + ctx context.Context, + method string, + req, resp interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, + ) error { + var err error + var parentCtx opentracing.SpanContext + if parent := opentracing.SpanFromContext(ctx); parent != nil { + parentCtx = parent.Context() + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(parentCtx, method, req, resp) { + return invoker(ctx, method, req, resp, cc, opts...) + } + clientSpan := tracer.StartSpan( + method, + opentracing.ChildOf(parentCtx), + ext.SpanKindRPCClient, + gRPCComponentTag, + ) + defer clientSpan.Finish() + ctx = injectSpanContext(ctx, tracer, clientSpan) + if otgrpcOpts.logPayloads { + clientSpan.LogFields(log.Object("gRPC request", req)) + } + err = invoker(ctx, method, req, resp, cc, opts...) + if err == nil { + if otgrpcOpts.logPayloads { + clientSpan.LogFields(log.Object("gRPC response", resp)) + } + } else { + SetSpanTags(clientSpan, err, true) + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(clientSpan, method, req, resp, err) + } + return err + } +} + +// OpenTracingStreamClientInterceptor returns a grpc.StreamClientInterceptor suitable +// for use in a grpc.Dial call. The interceptor instruments streaming RPCs by creating +// a single span to correspond to the lifetime of the RPC's stream. +// +// For example: +// +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer))) +// +// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC +// metadata; they will also look in the context.Context for an active +// in-process parent Span and establish a ChildOf reference if such a parent +// Span could be found. +func OpenTracingStreamClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamClientInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func( + ctx context.Context, + desc *grpc.StreamDesc, + cc *grpc.ClientConn, + method string, + streamer grpc.Streamer, + opts ...grpc.CallOption, + ) (grpc.ClientStream, error) { + var err error + var parentCtx opentracing.SpanContext + if parent := opentracing.SpanFromContext(ctx); parent != nil { + parentCtx = parent.Context() + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(parentCtx, method, nil, nil) { + return streamer(ctx, desc, cc, method, opts...) + } + + clientSpan := tracer.StartSpan( + method, + opentracing.ChildOf(parentCtx), + ext.SpanKindRPCClient, + gRPCComponentTag, + ) + ctx = injectSpanContext(ctx, tracer, clientSpan) + cs, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + SetSpanTags(clientSpan, err, true) + clientSpan.Finish() + return cs, err + } + return newOpenTracingClientStream(cs, method, desc, clientSpan, otgrpcOpts), nil + } +} + +func newOpenTracingClientStream(cs grpc.ClientStream, method string, desc *grpc.StreamDesc, clientSpan opentracing.Span, otgrpcOpts *options) grpc.ClientStream { + finishChan := make(chan struct{}) + + isFinished := new(int32) + *isFinished = 0 + finishFunc := func(err error) { + // The current OpenTracing specification forbids finishing a span more than + // once. Since we have multiple code paths that could concurrently call + // `finishFunc`, we need to add some sort of synchronization to guard against + // multiple finishing. + if !atomic.CompareAndSwapInt32(isFinished, 0, 1) { + return + } + close(finishChan) + defer clientSpan.Finish() + if err != nil { + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + SetSpanTags(clientSpan, err, true) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(clientSpan, method, nil, nil, err) + } + } + go func() { + select { + case <-finishChan: + // The client span is being finished by another code path; hence, no + // action is necessary. + case <-cs.Context().Done(): + finishFunc(cs.Context().Err()) + } + }() + otcs := &openTracingClientStream{ + ClientStream: cs, + desc: desc, + finishFunc: finishFunc, + } + + // The `ClientStream` interface allows one to omit calling `Recv` if it's + // known that the result will be `io.EOF`. See + // http://stackoverflow.com/q/42915337 + // In such cases, there's nothing that triggers the span to finish. We, + // therefore, set a finalizer so that the span and the context goroutine will + // at least be cleaned up when the garbage collector is run. + runtime.SetFinalizer(otcs, func(otcs *openTracingClientStream) { + otcs.finishFunc(nil) + }) + return otcs +} + +type openTracingClientStream struct { + grpc.ClientStream + desc *grpc.StreamDesc + finishFunc func(error) +} + +func (cs *openTracingClientStream) Header() (metadata.MD, error) { + md, err := cs.ClientStream.Header() + if err != nil { + cs.finishFunc(err) + } + return md, err +} + +func (cs *openTracingClientStream) SendMsg(m interface{}) error { + err := cs.ClientStream.SendMsg(m) + if err != nil { + cs.finishFunc(err) + } + return err +} + +func (cs *openTracingClientStream) RecvMsg(m interface{}) error { + err := cs.ClientStream.RecvMsg(m) + if err == io.EOF { + cs.finishFunc(nil) + return err + } else if err != nil { + cs.finishFunc(err) + return err + } + if !cs.desc.ServerStreams { + cs.finishFunc(nil) + } + return err +} + +func (cs *openTracingClientStream) CloseSend() error { + err := cs.ClientStream.CloseSend() + if err != nil { + cs.finishFunc(err) + } + return err +} + +func injectSpanContext(ctx context.Context, tracer opentracing.Tracer, clientSpan opentracing.Span) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.New(nil) + } else { + md = md.Copy() + } + mdWriter := metadataReaderWriter{md} + err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter) + // We have no better place to record an error than the Span itself :-/ + if err != nil { + clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err)) + } + return metadata.NewOutgoingContext(ctx, md) +} diff --git a/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go new file mode 100644 index 000000000..41a6346f2 --- /dev/null +++ b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go @@ -0,0 +1,69 @@ +package otgrpc + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// A Class is a set of types of outcomes (including errors) that will often +// be handled in the same way. +type Class string + +const ( + Unknown Class = "0xx" + // Success represents outcomes that achieved the desired results. + Success Class = "2xx" + // ClientError represents errors that were the client's fault. + ClientError Class = "4xx" + // ServerError represents errors that were the server's fault. + ServerError Class = "5xx" +) + +// ErrorClass returns the class of the given error +func ErrorClass(err error) Class { + if s, ok := status.FromError(err); ok { + switch s.Code() { + // Success or "success" + case codes.OK, codes.Canceled: + return Success + + // Client errors + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, + codes.PermissionDenied, codes.Unauthenticated, codes.FailedPrecondition, + codes.OutOfRange: + return ClientError + + // Server errors + case codes.DeadlineExceeded, codes.ResourceExhausted, codes.Aborted, + codes.Unimplemented, codes.Internal, codes.Unavailable, codes.DataLoss: + return ServerError + + // Not sure + case codes.Unknown: + fallthrough + default: + return Unknown + } + } + return Unknown +} + +// SetSpanTags sets one or more tags on the given span according to the +// error. +func SetSpanTags(span opentracing.Span, err error, client bool) { + c := ErrorClass(err) + code := codes.Unknown + if s, ok := status.FromError(err); ok { + code = s.Code() + } + span.SetTag("response_code", code) + span.SetTag("response_class", c) + if err == nil { + return + } + if client || c == ServerError { + ext.Error.Set(span, true) + } +} diff --git a/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go new file mode 100644 index 000000000..903e8382e --- /dev/null +++ b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go @@ -0,0 +1,76 @@ +package otgrpc + +import "github.com/opentracing/opentracing-go" + +// Option instances may be used in OpenTracing(Server|Client)Interceptor +// initialization. +// +// See this post about the "functional options" pattern: +// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type Option func(o *options) + +// LogPayloads returns an Option that tells the OpenTracing instrumentation to +// try to log application payloads in both directions. +func LogPayloads() Option { + return func(o *options) { + o.logPayloads = true + } +} + +// SpanInclusionFunc provides an optional mechanism to decide whether or not +// to trace a given gRPC call. Return true to create a Span and initiate +// tracing, false to not create a Span and not trace. +// +// parentSpanCtx may be nil if no parent could be extraction from either the Go +// context.Context (on the client) or the RPC (on the server). +type SpanInclusionFunc func( + parentSpanCtx opentracing.SpanContext, + method string, + req, resp interface{}) bool + +// IncludingSpans binds a IncludeSpanFunc to the options +func IncludingSpans(inclusionFunc SpanInclusionFunc) Option { + return func(o *options) { + o.inclusionFunc = inclusionFunc + } +} + +// SpanDecoratorFunc provides an (optional) mechanism for otgrpc users to add +// arbitrary tags/logs/etc to the opentracing.Span associated with client +// and/or server RPCs. +type SpanDecoratorFunc func( + span opentracing.Span, + method string, + req, resp interface{}, + grpcError error) + +// SpanDecorator binds a function that decorates gRPC Spans. +func SpanDecorator(decorator SpanDecoratorFunc) Option { + return func(o *options) { + o.decorator = decorator + } +} + +// The internal-only options struct. Obviously overkill at the moment; but will +// scale well as production use dictates other configuration and tuning +// parameters. +type options struct { + logPayloads bool + decorator SpanDecoratorFunc + // May be nil. + inclusionFunc SpanInclusionFunc +} + +// newOptions returns the default options. +func newOptions() *options { + return &options{ + logPayloads: false, + inclusionFunc: nil, + } +} + +func (o *options) apply(opts ...Option) { + for _, opt := range opts { + opt(o) + } +} diff --git a/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go new file mode 100644 index 000000000..4ff3d1997 --- /dev/null +++ b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go @@ -0,0 +1,5 @@ +// Package otgrpc provides OpenTracing support for any gRPC client or server. +// +// See the README for simple usage examples: +// https://github.com/grpc-ecosystem/grpc-opentracing/blob/master/go/otgrpc/README.md +package otgrpc diff --git a/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go new file mode 100644 index 000000000..62cf54d22 --- /dev/null +++ b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go @@ -0,0 +1,141 @@ +package otgrpc + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// OpenTracingServerInterceptor returns a grpc.UnaryServerInterceptor suitable +// for use in a grpc.NewServer call. +// +// For example: +// +// s := grpc.NewServer( +// ..., // (existing ServerOptions) +// grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer))) +// +// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC +// metadata; if found, the server span will act as the ChildOf that RPC +// SpanContext. +// +// Root or not, the server Span will be embedded in the context.Context for the +// application-specific gRPC handler(s) to access. +func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryServerInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (resp interface{}, err error) { + spanContext, err := extractSpanContext(ctx, tracer) + if err != nil && err != opentracing.ErrSpanContextNotFound { + // TODO: establish some sort of error reporting mechanism here. We + // don't know where to put such an error and must rely on Tracer + // implementations to do something appropriate for the time being. + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, req, nil) { + return handler(ctx, req) + } + serverSpan := tracer.StartSpan( + info.FullMethod, + ext.RPCServerOption(spanContext), + gRPCComponentTag, + ) + defer serverSpan.Finish() + + ctx = opentracing.ContextWithSpan(ctx, serverSpan) + if otgrpcOpts.logPayloads { + serverSpan.LogFields(log.Object("gRPC request", req)) + } + resp, err = handler(ctx, req) + if err == nil { + if otgrpcOpts.logPayloads { + serverSpan.LogFields(log.Object("gRPC response", resp)) + } + } else { + SetSpanTags(serverSpan, err, false) + serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(serverSpan, info.FullMethod, req, resp, err) + } + return resp, err + } +} + +// OpenTracingStreamServerInterceptor returns a grpc.StreamServerInterceptor suitable +// for use in a grpc.NewServer call. The interceptor instruments streaming RPCs by +// creating a single span to correspond to the lifetime of the RPC's stream. +// +// For example: +// +// s := grpc.NewServer( +// ..., // (existing ServerOptions) +// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))) +// +// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC +// metadata; if found, the server span will act as the ChildOf that RPC +// SpanContext. +// +// Root or not, the server Span will be embedded in the context.Context for the +// application-specific gRPC handler(s) to access. +func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamServerInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + spanContext, err := extractSpanContext(ss.Context(), tracer) + if err != nil && err != opentracing.ErrSpanContextNotFound { + // TODO: establish some sort of error reporting mechanism here. We + // don't know where to put such an error and must rely on Tracer + // implementations to do something appropriate for the time being. + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, nil, nil) { + return handler(srv, ss) + } + + serverSpan := tracer.StartSpan( + info.FullMethod, + ext.RPCServerOption(spanContext), + gRPCComponentTag, + ) + defer serverSpan.Finish() + ss = &openTracingServerStream{ + ServerStream: ss, + ctx: opentracing.ContextWithSpan(ss.Context(), serverSpan), + } + err = handler(srv, ss) + if err != nil { + SetSpanTags(serverSpan, err, false) + serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(serverSpan, info.FullMethod, nil, nil, err) + } + return err + } +} + +type openTracingServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (ss *openTracingServerStream) Context() context.Context { + return ss.ctx +} + +func extractSpanContext(ctx context.Context, tracer opentracing.Tracer) (opentracing.SpanContext, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + md = metadata.New(nil) + } + return tracer.Extract(opentracing.HTTPHeaders, metadataReaderWriter{md}) +} diff --git a/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go new file mode 100644 index 000000000..9abd5eaa6 --- /dev/null +++ b/extender/code/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go @@ -0,0 +1,42 @@ +package otgrpc + +import ( + "strings" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "google.golang.org/grpc/metadata" +) + +var ( + // Morally a const: + gRPCComponentTag = opentracing.Tag{string(ext.Component), "gRPC"} +) + +// metadataReaderWriter satisfies both the opentracing.TextMapReader and +// opentracing.TextMapWriter interfaces. +type metadataReaderWriter struct { + metadata.MD +} + +func (w metadataReaderWriter) Set(key, val string) { + // The GRPC HPACK implementation rejects any uppercase keys here. + // + // As such, since the HTTP_HEADERS format is case-insensitive anyway, we + // blindly lowercase the key (which is guaranteed to work in the + // Inject/Extract sense per the OpenTracing spec). + key = strings.ToLower(key) + w.MD[key] = append(w.MD[key], val) +} + +func (w metadataReaderWriter) ForeachKey(handler func(key, val string) error) error { + for k, vals := range w.MD { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + + return nil +} diff --git a/extender/code/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/extender/code/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml new file mode 100644 index 000000000..1a0bbea6c --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - tip diff --git a/extender/code/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/extender/code/vendor/github.com/hashicorp/go-immutable-radix/README.md b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/README.md new file mode 100644 index 000000000..8910fcc03 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -0,0 +1,41 @@ +go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + diff --git a/extender/code/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 000000000..a63674775 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/extender/code/vendor/github.com/hashicorp/go-immutable-radix/go.mod b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/go.mod new file mode 100644 index 000000000..27e7b7c95 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/go.mod @@ -0,0 +1,6 @@ +module github.com/hashicorp/go-immutable-radix + +require ( + github.com/hashicorp/go-uuid v1.0.0 + github.com/hashicorp/golang-lru v0.5.0 +) diff --git a/extender/code/vendor/github.com/hashicorp/go-immutable-radix/go.sum b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/go.sum new file mode 100644 index 000000000..7de5dfc50 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= diff --git a/extender/code/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 000000000..e5e6e57f2 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,662 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/extender/code/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 000000000..9815e0253 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,91 @@ +package iradix + +import "bytes" + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + edges{ + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/extender/code/vendor/github.com/hashicorp/go-immutable-radix/node.go b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 000000000..7a065e7a0 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,292 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/extender/code/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 000000000..04814c132 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + rawStackEntry{ + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/extender/code/vendor/github.com/hashicorp/go-memdb/.gitignore b/extender/code/vendor/github.com/hashicorp/go-memdb/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-memdb/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/extender/code/vendor/github.com/hashicorp/go-memdb/.travis.yml b/extender/code/vendor/github.com/hashicorp/go-memdb/.travis.yml new file mode 100644 index 000000000..9e770fa22 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-memdb/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - "1.10" + +script: + - go test diff --git a/extender/code/vendor/github.com/hashicorp/go-memdb/LICENSE b/extender/code/vendor/github.com/hashicorp/go-memdb/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-memdb/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/extender/code/vendor/github.com/hashicorp/go-memdb/README.md b/extender/code/vendor/github.com/hashicorp/go-memdb/README.md new file mode 100644 index 000000000..65e1eaefe --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-memdb/README.md @@ -0,0 +1,98 @@ +# go-memdb + +Provides the `memdb` package that implements a simple in-memory database +built on immutable radix trees. The database provides Atomicity, Consistency +and Isolation from ACID. Being that it is in-memory, it does not provide durability. +The database is instantiated with a schema that specifies the tables and indices +that exist and allows transactions to be executed. + +The database provides the following: + +* Multi-Version Concurrency Control (MVCC) - By leveraging immutable radix trees + the database is able to support any number of concurrent readers without locking, + and allows a writer to make progress. + +* Transaction Support - The database allows for rich transactions, in which multiple + objects are inserted, updated or deleted. The transactions can span multiple tables, + and are applied atomically. The database provides atomicity and isolation in ACID + terminology, such that until commit the updates are not visible. + +* Rich Indexing - Tables can support any number of indexes, which can be simple like + a single field index, or more advanced compound field indexes. Certain types like + UUID can be efficiently compressed from strings into byte indexes for reduced + storage requirements. + +* Watches - Callers can populate a watch set as part of a query, which can be used to + detect when a modification has been made to the database which affects the query + results. This lets callers easily watch for changes in the database in a very general + way. + +For the underlying immutable radix trees, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-memdb). + +Example +======= + +Below is a simple example of usage + +```go +// Create a sample struct +type Person struct { + Email string + Name string + Age int +} + +// Create the DB schema +schema := &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + "person": &memdb.TableSchema{ + Name: "person", + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "Email"}, + }, + }, + }, + }, +} + +// Create a new data base +db, err := memdb.NewMemDB(schema) +if err != nil { + panic(err) +} + +// Create a write transaction +txn := db.Txn(true) + +// Insert a new person +p := &Person{"joe@aol.com", "Joe", 30} +if err := txn.Insert("person", p); err != nil { + panic(err) +} + +// Commit the transaction +txn.Commit() + +// Create read-only transaction +txn = db.Txn(false) +defer txn.Abort() + +// Lookup by email +raw, err := txn.First("person", "id", "joe@aol.com") +if err != nil { + panic(err) +} + +// Say hi! +fmt.Printf("Hello %s!", raw.(*Person).Name) + +``` + diff --git a/extender/code/vendor/github.com/hashicorp/go-memdb/filter.go b/extender/code/vendor/github.com/hashicorp/go-memdb/filter.go new file mode 100644 index 000000000..2e3a9b3f7 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-memdb/filter.go @@ -0,0 +1,33 @@ +package memdb + +// FilterFunc is a function that takes the results of an iterator and returns +// whether the result should be filtered out. +type FilterFunc func(interface{}) bool + +// FilterIterator is used to wrap a ResultIterator and apply a filter over it. +type FilterIterator struct { + // filter is the filter function applied over the base iterator. + filter FilterFunc + + // iter is the iterator that is being wrapped. + iter ResultIterator +} + +func NewFilterIterator(wrap ResultIterator, filter FilterFunc) *FilterIterator { + return &FilterIterator{ + filter: filter, + iter: wrap, + } +} + +// WatchCh returns the watch channel of the wrapped iterator. +func (f *FilterIterator) WatchCh() <-chan struct{} { return f.iter.WatchCh() } + +// Next returns the next non-filtered result from the wrapped iterator +func (f *FilterIterator) Next() interface{} { + for { + if value := f.iter.Next(); value == nil || !f.filter(value) { + return value + } + } +} diff --git a/extender/code/vendor/github.com/hashicorp/go-memdb/index.go b/extender/code/vendor/github.com/hashicorp/go-memdb/index.go new file mode 100644 index 000000000..cca853c5a --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-memdb/index.go @@ -0,0 +1,581 @@ +package memdb + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "reflect" + "strings" +) + +// Indexer is an interface used for defining indexes. Indexes are used +// for efficient lookup of objects in a MemDB table. An Indexer must also +// implement one of SingleIndexer or MultiIndexer. +// +// Indexers are primarily responsible for returning the lookup key as +// a byte slice. The byte slice is the key data in the underlying data storage. +type Indexer interface { + // FromArgs is called to build the exact index key from a list of arguments. + FromArgs(args ...interface{}) ([]byte, error) +} + +// SingleIndexer is an interface used for defining indexes that generate a +// single value per object +type SingleIndexer interface { + // FromObject extracts the index value from an object. The return values + // are whether the index value was found, the index value, and any error + // while extracting the index value, respectively. + FromObject(raw interface{}) (bool, []byte, error) +} + +// MultiIndexer is an interface used for defining indexes that generate +// multiple values per object. Each value is stored as a seperate index +// pointing to the same object. +// +// For example, an index that extracts the first and last name of a person +// and allows lookup based on eitherd would be a MultiIndexer. The FromObject +// of this example would split the first and last name and return both as +// values. +type MultiIndexer interface { + // FromObject extracts index values from an object. The return values + // are the same as a SingleIndexer except there can be multiple index + // values. + FromObject(raw interface{}) (bool, [][]byte, error) +} + +// PrefixIndexer is an optional interface on top of an Indexer that allows +// indexes to support prefix-based iteration. +type PrefixIndexer interface { + // PrefixFromArgs is the same as FromArgs for an Indexer except that + // the index value returned should return all prefix-matched values. + PrefixFromArgs(args ...interface{}) ([]byte, error) +} + +// StringFieldIndex is used to extract a field from an object +// using reflection and builds an index on that field. +type StringFieldIndex struct { + Field string + Lowercase bool +} + +func (s *StringFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + val := fv.String() + if val == "" { + return false, nil, nil + } + + if s.Lowercase { + val = strings.ToLower(val) + } + + // Add the null character as a terminator + val += "\x00" + return true, []byte(val), nil +} + +func (s *StringFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + arg = strings.ToLower(arg) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (s *StringFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := s.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// StringSliceFieldIndex builds an index from a field on an object that is a +// string slice ([]string). Each value within the string slice can be used for +// lookup. +type StringSliceFieldIndex struct { + Field string + Lowercase bool +} + +func (s *StringSliceFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + if fv.Kind() != reflect.Slice || fv.Type().Elem().Kind() != reflect.String { + return false, nil, fmt.Errorf("field '%s' is not a string slice", s.Field) + } + + length := fv.Len() + vals := make([][]byte, 0, length) + for i := 0; i < fv.Len(); i++ { + val := fv.Index(i).String() + if val == "" { + continue + } + + if s.Lowercase { + val = strings.ToLower(val) + } + + // Add the null character as a terminator + val += "\x00" + vals = append(vals, []byte(val)) + } + if len(vals) == 0 { + return false, nil, nil + } + return true, vals, nil +} + +func (s *StringSliceFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + arg = strings.ToLower(arg) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (s *StringSliceFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := s.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// StringMapFieldIndex is used to extract a field of type map[string]string +// from an object using reflection and builds an index on that field. +type StringMapFieldIndex struct { + Field string + Lowercase bool +} + +var MapType = reflect.MapOf(reflect.TypeOf(""), reflect.TypeOf("")).Kind() + +func (s *StringMapFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + if fv.Kind() != MapType { + return false, nil, fmt.Errorf("field '%s' is not a map[string]string", s.Field) + } + + length := fv.Len() + vals := make([][]byte, 0, length) + for _, key := range fv.MapKeys() { + k := key.String() + if k == "" { + continue + } + val := fv.MapIndex(key).String() + + if s.Lowercase { + k = strings.ToLower(k) + val = strings.ToLower(val) + } + + // Add the null character as a terminator + k += "\x00" + val + "\x00" + + vals = append(vals, []byte(k)) + } + if len(vals) == 0 { + return false, nil, nil + } + return true, vals, nil +} + +func (s *StringMapFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) > 2 || len(args) == 0 { + return nil, fmt.Errorf("must provide one or two arguments") + } + key, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + key = strings.ToLower(key) + } + // Add the null character as a terminator + key += "\x00" + + if len(args) == 2 { + val, ok := args[1].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[1]) + } + if s.Lowercase { + val = strings.ToLower(val) + } + // Add the null character as a terminator + key += val + "\x00" + } + + return []byte(key), nil +} + +// UintFieldIndex is used to extract a uint field from an object using +// reflection and builds an index on that field. +type UintFieldIndex struct { + Field string +} + +func (u *UintFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(u.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj) + } + + // Check the type + k := fv.Kind() + size, ok := IsUintType(k) + if !ok { + return false, nil, fmt.Errorf("field %q is of type %v; want a uint", u.Field, k) + } + + // Get the value and encode it + val := fv.Uint() + buf := make([]byte, size) + binary.PutUvarint(buf, val) + + return true, buf, nil +} + +func (u *UintFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + v := reflect.ValueOf(args[0]) + if !v.IsValid() { + return nil, fmt.Errorf("%#v is invalid", args[0]) + } + + k := v.Kind() + size, ok := IsUintType(k) + if !ok { + return nil, fmt.Errorf("arg is of type %v; want a uint", k) + } + + val := v.Uint() + buf := make([]byte, size) + binary.PutUvarint(buf, val) + + return buf, nil +} + +// IsUintType returns whether the passed type is a type of uint and the number +// of bytes needed to encode the type. +func IsUintType(k reflect.Kind) (size int, okay bool) { + switch k { + case reflect.Uint: + return binary.MaxVarintLen64, true + case reflect.Uint8: + return 2, true + case reflect.Uint16: + return binary.MaxVarintLen16, true + case reflect.Uint32: + return binary.MaxVarintLen32, true + case reflect.Uint64: + return binary.MaxVarintLen64, true + default: + return 0, false + } +} + +// UUIDFieldIndex is used to extract a field from an object +// using reflection and builds an index on that field by treating +// it as a UUID. This is an optimization to using a StringFieldIndex +// as the UUID can be more compactly represented in byte form. +type UUIDFieldIndex struct { + Field string +} + +func (u *UUIDFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(u.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj) + } + + val := fv.String() + if val == "" { + return false, nil, nil + } + + buf, err := u.parseString(val, true) + return true, buf, err +} + +func (u *UUIDFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + switch arg := args[0].(type) { + case string: + return u.parseString(arg, true) + case []byte: + if len(arg) != 16 { + return nil, fmt.Errorf("byte slice must be 16 characters") + } + return arg, nil + default: + return nil, + fmt.Errorf("argument must be a string or byte slice: %#v", args[0]) + } +} + +func (u *UUIDFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + switch arg := args[0].(type) { + case string: + return u.parseString(arg, false) + case []byte: + return arg, nil + default: + return nil, + fmt.Errorf("argument must be a string or byte slice: %#v", args[0]) + } +} + +// parseString parses a UUID from the string. If enforceLength is false, it will +// parse a partial UUID. An error is returned if the input, stripped of hyphens, +// is not even length. +func (u *UUIDFieldIndex) parseString(s string, enforceLength bool) ([]byte, error) { + // Verify the length + l := len(s) + if enforceLength && l != 36 { + return nil, fmt.Errorf("UUID must be 36 characters") + } else if l > 36 { + return nil, fmt.Errorf("Invalid UUID length. UUID have 36 characters; got %d", l) + } + + hyphens := strings.Count(s, "-") + if hyphens > 4 { + return nil, fmt.Errorf(`UUID should have maximum of 4 "-"; got %d`, hyphens) + } + + // The sanitized length is the length of the original string without the "-". + sanitized := strings.Replace(s, "-", "", -1) + sanitizedLength := len(sanitized) + if sanitizedLength%2 != 0 { + return nil, fmt.Errorf("Input (without hyphens) must be even length") + } + + dec, err := hex.DecodeString(sanitized) + if err != nil { + return nil, fmt.Errorf("Invalid UUID: %v", err) + } + + return dec, nil +} + +// FieldSetIndex is used to extract a field from an object using reflection and +// builds an index on whether the field is set by comparing it against its +// type's nil value. +type FieldSetIndex struct { + Field string +} + +func (f *FieldSetIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(f.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", f.Field, obj) + } + + if fv.Interface() == reflect.Zero(fv.Type()).Interface() { + return true, []byte{0}, nil + } + + return true, []byte{1}, nil +} + +func (f *FieldSetIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// ConditionalIndex builds an index based on a condition specified by a passed +// user function. This function may examine the passed object and return a +// boolean to encapsulate an arbitrarily complex conditional. +type ConditionalIndex struct { + Conditional ConditionalIndexFunc +} + +// ConditionalIndexFunc is the required function interface for a +// ConditionalIndex. +type ConditionalIndexFunc func(obj interface{}) (bool, error) + +func (c *ConditionalIndex) FromObject(obj interface{}) (bool, []byte, error) { + // Call the user's function + res, err := c.Conditional(obj) + if err != nil { + return false, nil, fmt.Errorf("ConditionalIndexFunc(%#v) failed: %v", obj, err) + } + + if res { + return true, []byte{1}, nil + } + + return true, []byte{0}, nil +} + +func (c *ConditionalIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// fromBoolArgs is a helper that expects only a single boolean argument and +// returns a single length byte array containing either a one or zero depending +// on whether the passed input is true or false respectively. +func fromBoolArgs(args []interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + if val, ok := args[0].(bool); !ok { + return nil, fmt.Errorf("argument must be a boolean type: %#v", args[0]) + } else if val { + return []byte{1}, nil + } + + return []byte{0}, nil +} + +// CompoundIndex is used to build an index using multiple sub-indexes +// Prefix based iteration is supported as long as the appropriate prefix +// of indexers support it. All sub-indexers are only assumed to expect +// a single argument. +type CompoundIndex struct { + Indexes []Indexer + + // AllowMissing results in an index based on only the indexers + // that return data. If true, you may end up with 2/3 columns + // indexed which might be useful for an index scan. Otherwise, + // the CompoundIndex requires all indexers to be satisfied. + AllowMissing bool +} + +func (c *CompoundIndex) FromObject(raw interface{}) (bool, []byte, error) { + var out []byte + for i, idxRaw := range c.Indexes { + idx, ok := idxRaw.(SingleIndexer) + if !ok { + return false, nil, fmt.Errorf("sub-index %d error: %s", i, "sub-index must be a SingleIndexer") + } + ok, val, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break + } else { + return false, nil, nil + } + } + out = append(out, val...) + } + return true, out, nil +} + +func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != len(c.Indexes) { + return nil, fmt.Errorf("less arguments than index fields") + } + var out []byte + for i, arg := range args { + val, err := c.Indexes[i].FromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + return out, nil +} + +func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + if len(args) > len(c.Indexes) { + return nil, fmt.Errorf("more arguments than index fields") + } + var out []byte + for i, arg := range args { + if i+1 < len(args) { + val, err := c.Indexes[i].FromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } else { + prefixIndexer, ok := c.Indexes[i].(PrefixIndexer) + if !ok { + return nil, fmt.Errorf("sub-index %d does not support prefix scanning", i) + } + val, err := prefixIndexer.PrefixFromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + } + return out, nil +} diff --git a/extender/code/vendor/github.com/hashicorp/go-memdb/memdb.go b/extender/code/vendor/github.com/hashicorp/go-memdb/memdb.go new file mode 100644 index 000000000..65c920731 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-memdb/memdb.go @@ -0,0 +1,97 @@ +// Package memdb provides an in-memory database that supports transactions +// and MVCC. +package memdb + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/hashicorp/go-immutable-radix" +) + +// MemDB is an in-memory database. +// +// MemDB provides a table abstraction to store objects (rows) with multiple +// indexes based on inserted values. The database makes use of immutable radix +// trees to provide transactions and MVCC. +type MemDB struct { + schema *DBSchema + root unsafe.Pointer // *iradix.Tree underneath + primary bool + + // There can only be a single writer at once + writer sync.Mutex +} + +// NewMemDB creates a new MemDB with the given schema +func NewMemDB(schema *DBSchema) (*MemDB, error) { + // Validate the schema + if err := schema.Validate(); err != nil { + return nil, err + } + + // Create the MemDB + db := &MemDB{ + schema: schema, + root: unsafe.Pointer(iradix.New()), + primary: true, + } + if err := db.initialize(); err != nil { + return nil, err + } + + return db, nil +} + +// getRoot is used to do an atomic load of the root pointer +func (db *MemDB) getRoot() *iradix.Tree { + root := (*iradix.Tree)(atomic.LoadPointer(&db.root)) + return root +} + +// Txn is used to start a new transaction, in either read or write mode. +// There can only be a single concurrent writer, but any number of readers. +func (db *MemDB) Txn(write bool) *Txn { + if write { + db.writer.Lock() + } + txn := &Txn{ + db: db, + write: write, + rootTxn: db.getRoot().Txn(), + } + return txn +} + +// Snapshot is used to capture a point-in-time snapshot +// of the database that will not be affected by any write +// operations to the existing DB. +func (db *MemDB) Snapshot() *MemDB { + clone := &MemDB{ + schema: db.schema, + root: unsafe.Pointer(db.getRoot()), + primary: false, + } + return clone +} + +// initialize is used to setup the DB for use after creation. This should +// be called only once after allocating a MemDB. +func (db *MemDB) initialize() error { + root := db.getRoot() + for tName, tableSchema := range db.schema.Tables { + for iName := range tableSchema.Indexes { + index := iradix.New() + path := indexPath(tName, iName) + root, _, _ = root.Insert(path, index) + } + } + db.root = unsafe.Pointer(root) + return nil +} + +// indexPath returns the path from the root to the given table index +func indexPath(table, index string) []byte { + return []byte(table + "." + index) +} diff --git a/extender/code/vendor/github.com/hashicorp/go-memdb/schema.go b/extender/code/vendor/github.com/hashicorp/go-memdb/schema.go new file mode 100644 index 000000000..e6a9b526b --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-memdb/schema.go @@ -0,0 +1,114 @@ +package memdb + +import "fmt" + +// DBSchema is the schema to use for the full database with a MemDB instance. +// +// MemDB will require a valid schema. Schema validation can be tested using +// the Validate function. Calling this function is recommended in unit tests. +type DBSchema struct { + // Tables is the set of tables within this database. The key is the + // table name and must match the Name in TableSchema. + Tables map[string]*TableSchema +} + +// Validate validates the schema. +func (s *DBSchema) Validate() error { + if s == nil { + return fmt.Errorf("schema is nil") + } + + if len(s.Tables) == 0 { + return fmt.Errorf("schema has no tables defined") + } + + for name, table := range s.Tables { + if name != table.Name { + return fmt.Errorf("table name mis-match for '%s'", name) + } + + if err := table.Validate(); err != nil { + return fmt.Errorf("table %q: %s", name, err) + } + } + + return nil +} + +// TableSchema is the schema for a single table. +type TableSchema struct { + // Name of the table. This must match the key in the Tables map in DBSchema. + Name string + + // Indexes is the set of indexes for querying this table. The key + // is a unique name for the index and must match the Name in the + // IndexSchema. + Indexes map[string]*IndexSchema +} + +// Validate is used to validate the table schema +func (s *TableSchema) Validate() error { + if s.Name == "" { + return fmt.Errorf("missing table name") + } + + if len(s.Indexes) == 0 { + return fmt.Errorf("missing table indexes for '%s'", s.Name) + } + + if _, ok := s.Indexes["id"]; !ok { + return fmt.Errorf("must have id index") + } + + if !s.Indexes["id"].Unique { + return fmt.Errorf("id index must be unique") + } + + if _, ok := s.Indexes["id"].Indexer.(SingleIndexer); !ok { + return fmt.Errorf("id index must be a SingleIndexer") + } + + for name, index := range s.Indexes { + if name != index.Name { + return fmt.Errorf("index name mis-match for '%s'", name) + } + + if err := index.Validate(); err != nil { + return fmt.Errorf("index %q: %s", name, err) + } + } + + return nil +} + +// IndexSchema is the schema for an index. An index defines how a table is +// queried. +type IndexSchema struct { + // Name of the index. This must be unique among a tables set of indexes. + // This must match the key in the map of Indexes for a TableSchema. + Name string + + // AllowMissing if true ignores this index if it doesn't produce a + // value. For example, an index that extracts a field that doesn't + // exist from a structure. + AllowMissing bool + + Unique bool + Indexer Indexer +} + +func (s *IndexSchema) Validate() error { + if s.Name == "" { + return fmt.Errorf("missing index name") + } + if s.Indexer == nil { + return fmt.Errorf("missing index function for '%s'", s.Name) + } + switch s.Indexer.(type) { + case SingleIndexer: + case MultiIndexer: + default: + return fmt.Errorf("indexer for '%s' must be a SingleIndexer or MultiIndexer", s.Name) + } + return nil +} diff --git a/extender/code/vendor/github.com/hashicorp/go-memdb/txn.go b/extender/code/vendor/github.com/hashicorp/go-memdb/txn.go new file mode 100644 index 000000000..2b85087ea --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-memdb/txn.go @@ -0,0 +1,644 @@ +package memdb + +import ( + "bytes" + "fmt" + "strings" + "sync/atomic" + "unsafe" + + "github.com/hashicorp/go-immutable-radix" +) + +const ( + id = "id" +) + +var ( + // ErrNotFound is returned when the requested item is not found + ErrNotFound = fmt.Errorf("not found") +) + +// tableIndex is a tuple of (Table, Index) used for lookups +type tableIndex struct { + Table string + Index string +} + +// Txn is a transaction against a MemDB. +// This can be a read or write transaction. +type Txn struct { + db *MemDB + write bool + rootTxn *iradix.Txn + after []func() + + modified map[tableIndex]*iradix.Txn +} + +// readableIndex returns a transaction usable for reading the given +// index in a table. If a write transaction is in progress, we may need +// to use an existing modified txn. +func (txn *Txn) readableIndex(table, index string) *iradix.Txn { + // Look for existing transaction + if txn.write && txn.modified != nil { + key := tableIndex{table, index} + exist, ok := txn.modified[key] + if ok { + return exist + } + } + + // Create a read transaction + path := indexPath(table, index) + raw, _ := txn.rootTxn.Get(path) + indexTxn := raw.(*iradix.Tree).Txn() + return indexTxn +} + +// writableIndex returns a transaction usable for modifying the +// given index in a table. +func (txn *Txn) writableIndex(table, index string) *iradix.Txn { + if txn.modified == nil { + txn.modified = make(map[tableIndex]*iradix.Txn) + } + + // Look for existing transaction + key := tableIndex{table, index} + exist, ok := txn.modified[key] + if ok { + return exist + } + + // Start a new transaction + path := indexPath(table, index) + raw, _ := txn.rootTxn.Get(path) + indexTxn := raw.(*iradix.Tree).Txn() + + // If we are the primary DB, enable mutation tracking. Snapshots should + // not notify, otherwise we will trigger watches on the primary DB when + // the writes will not be visible. + indexTxn.TrackMutate(txn.db.primary) + + // Keep this open for the duration of the txn + txn.modified[key] = indexTxn + return indexTxn +} + +// Abort is used to cancel this transaction. +// This is a noop for read transactions. +func (txn *Txn) Abort() { + // Noop for a read transaction + if !txn.write { + return + } + + // Check if already aborted or committed + if txn.rootTxn == nil { + return + } + + // Clear the txn + txn.rootTxn = nil + txn.modified = nil + + // Release the writer lock since this is invalid + txn.db.writer.Unlock() +} + +// Commit is used to finalize this transaction. +// This is a noop for read transactions. +func (txn *Txn) Commit() { + // Noop for a read transaction + if !txn.write { + return + } + + // Check if already aborted or committed + if txn.rootTxn == nil { + return + } + + // Commit each sub-transaction scoped to (table, index) + for key, subTxn := range txn.modified { + path := indexPath(key.Table, key.Index) + final := subTxn.CommitOnly() + txn.rootTxn.Insert(path, final) + } + + // Update the root of the DB + newRoot := txn.rootTxn.CommitOnly() + atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot)) + + // Now issue all of the mutation updates (this is safe to call + // even if mutation tracking isn't enabled); we do this after + // the root pointer is swapped so that waking responders will + // see the new state. + for _, subTxn := range txn.modified { + subTxn.Notify() + } + txn.rootTxn.Notify() + + // Clear the txn + txn.rootTxn = nil + txn.modified = nil + + // Release the writer lock since this is invalid + txn.db.writer.Unlock() + + // Run the deferred functions, if any + for i := len(txn.after); i > 0; i-- { + fn := txn.after[i-1] + fn() + } +} + +// Insert is used to add or update an object into the given table +func (txn *Txn) Insert(table string, obj interface{}) error { + if !txn.write { + return fmt.Errorf("cannot insert in read-only transaction") + } + + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return fmt.Errorf("invalid table '%s'", table) + } + + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(obj) + if err != nil { + return fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return fmt.Errorf("object missing primary index") + } + + // Lookup the object by ID first, to see if this is an update + idTxn := txn.writableIndex(table, id) + existing, update := idTxn.Get(idVal) + + // On an update, there is an existing object with the given + // primary ID. We do the update by deleting the current object + // and inserting the new object. + for name, indexSchema := range tableSchema.Indexes { + indexTxn := txn.writableIndex(table, name) + + // Determine the new index value + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(obj) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(obj) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + if ok && !indexSchema.Unique { + for i := range vals { + vals[i] = append(vals[i], idVal...) + } + } + + // Handle the update by deleting from the index first + if update { + var ( + okExist bool + valsExist [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var valExist []byte + okExist, valExist, err = indexer.FromObject(existing) + valsExist = [][]byte{valExist} + case MultiIndexer: + okExist, valsExist, err = indexer.FromObject(existing) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + if okExist { + for i, valExist := range valsExist { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + if !indexSchema.Unique { + valExist = append(valExist, idVal...) + } + + // If we are writing to the same index with the same value, + // we can avoid the delete as the insert will overwrite the + // value anyways. + if i >= len(vals) || !bytes.Equal(valExist, vals[i]) { + indexTxn.Delete(valExist) + } + } + } + } + + // If there is no index value, either this is an error or an expected + // case and we can skip updating + if !ok { + if indexSchema.AllowMissing { + continue + } else { + return fmt.Errorf("missing value for index '%s'", name) + } + } + + // Update the value of the index + for _, val := range vals { + indexTxn.Insert(val, obj) + } + } + return nil +} + +// Delete is used to delete a single object from the given table +// This object must already exist in the table +func (txn *Txn) Delete(table string, obj interface{}) error { + if !txn.write { + return fmt.Errorf("cannot delete in read-only transaction") + } + + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return fmt.Errorf("invalid table '%s'", table) + } + + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(obj) + if err != nil { + return fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return fmt.Errorf("object missing primary index") + } + + // Lookup the object by ID first, check fi we should continue + idTxn := txn.writableIndex(table, id) + existing, ok := idTxn.Get(idVal) + if !ok { + return ErrNotFound + } + + // Remove the object from all the indexes + for name, indexSchema := range tableSchema.Indexes { + indexTxn := txn.writableIndex(table, name) + + // Handle the update by deleting from the index first + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(existing) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(existing) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + if ok { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + for _, val := range vals { + if !indexSchema.Unique { + val = append(val, idVal...) + } + indexTxn.Delete(val) + } + } + } + return nil +} + +// DeletePrefix is used to delete an entire subtree based on a prefix. +// The given index must be a prefix index, and will be used to perform a scan and enumerate the set of objects to delete. +// These will be removed from all other indexes, and then a special prefix operation will delete the objects from the given index in an efficient subtree delete operation. +// This is useful when you have a very large number of objects indexed by the given index, along with a much smaller number of entries in the other indexes for those objects. +func (txn *Txn) DeletePrefix(table string, prefix_index string, prefix string) (bool, error) { + if !txn.write { + return false, fmt.Errorf("cannot delete in read-only transaction") + } + + if !strings.HasSuffix(prefix_index, "_prefix") { + return false, fmt.Errorf("Index name for DeletePrefix must be a prefix index, Got %v ", prefix_index) + } + + deletePrefixIndex := strings.TrimSuffix(prefix_index, "_prefix") + + // Get an iterator over all of the keys with the given prefix. + entries, err := txn.Get(table, prefix_index, prefix) + if err != nil { + return false, fmt.Errorf("failed kvs lookup: %s", err) + } + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return false, fmt.Errorf("invalid table '%s'", table) + } + + foundAny := false + for entry := entries.Next(); entry != nil; entry = entries.Next() { + if !foundAny { + foundAny = true + } + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(entry) + if err != nil { + return false, fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return false, fmt.Errorf("object missing primary index") + } + // Remove the object from all the indexes except the given prefix index + for name, indexSchema := range tableSchema.Indexes { + if name == deletePrefixIndex { + continue + } + indexTxn := txn.writableIndex(table, name) + + // Handle the update by deleting from the index first + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(entry) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(entry) + } + if err != nil { + return false, fmt.Errorf("failed to build index '%s': %v", name, err) + } + + if ok { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + for _, val := range vals { + if !indexSchema.Unique { + val = append(val, idVal...) + } + indexTxn.Delete(val) + } + } + } + } + if foundAny { + indexTxn := txn.writableIndex(table, deletePrefixIndex) + ok = indexTxn.DeletePrefix([]byte(prefix)) + if !ok { + panic(fmt.Errorf("prefix %v matched some entries but DeletePrefix did not delete any ", prefix)) + } + return true, nil + } + return false, nil +} + +// DeleteAll is used to delete all the objects in a given table +// matching the constraints on the index +func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error) { + if !txn.write { + return 0, fmt.Errorf("cannot delete in read-only transaction") + } + + // Get all the objects + iter, err := txn.Get(table, index, args...) + if err != nil { + return 0, err + } + + // Put them into a slice so there are no safety concerns while actually + // performing the deletes + var objs []interface{} + for { + obj := iter.Next() + if obj == nil { + break + } + + objs = append(objs, obj) + } + + // Do the deletes + num := 0 + for _, obj := range objs { + if err := txn.Delete(table, obj); err != nil { + return num, err + } + num++ + } + return num, nil +} + +// FirstWatch is used to return the first matching object for +// the given constraints on the index along with the watch channel +func (txn *Txn) FirstWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) { + // Get the index value + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + + // Do an exact lookup + if indexSchema.Unique && val != nil && indexSchema.Name == index { + watch, obj, ok := indexTxn.GetWatch(val) + if !ok { + return watch, nil, nil + } + return watch, obj, nil + } + + // Handle non-unique index by using an iterator and getting the first value + iter := indexTxn.Root().Iterator() + watch := iter.SeekPrefixWatch(val) + _, value, _ := iter.Next() + return watch, value, nil +} + +// First is used to return the first matching object for +// the given constraints on the index +func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) { + _, val, err := txn.FirstWatch(table, index, args...) + return val, err +} + +// LongestPrefix is used to fetch the longest prefix match for the given +// constraints on the index. Note that this will not work with the memdb +// StringFieldIndex because it adds null terminators which prevent the +// algorithm from correctly finding a match (it will get to right before the +// null and fail to find a leaf node). This should only be used where the prefix +// given is capable of matching indexed entries directly, which typically only +// applies to a custom indexer. See the unit test for an example. +func (txn *Txn) LongestPrefix(table, index string, args ...interface{}) (interface{}, error) { + // Enforce that this only works on prefix indexes. + if !strings.HasSuffix(index, "_prefix") { + return nil, fmt.Errorf("must use '%s_prefix' on index", index) + } + + // Get the index value. + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, err + } + + // This algorithm only makes sense against a unique index, otherwise the + // index keys will have the IDs appended to them. + if !indexSchema.Unique { + return nil, fmt.Errorf("index '%s' is not unique", index) + } + + // Find the longest prefix match with the given index. + indexTxn := txn.readableIndex(table, indexSchema.Name) + if _, value, ok := indexTxn.Root().LongestPrefix(val); ok { + return value, nil + } + return nil, nil +} + +// getIndexValue is used to get the IndexSchema and the value +// used to scan the index given the parameters. This handles prefix based +// scans when the index has the "_prefix" suffix. The index must support +// prefix iteration. +func (txn *Txn) getIndexValue(table, index string, args ...interface{}) (*IndexSchema, []byte, error) { + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return nil, nil, fmt.Errorf("invalid table '%s'", table) + } + + // Check for a prefix scan + prefixScan := false + if strings.HasSuffix(index, "_prefix") { + index = strings.TrimSuffix(index, "_prefix") + prefixScan = true + } + + // Get the index schema + indexSchema, ok := tableSchema.Indexes[index] + if !ok { + return nil, nil, fmt.Errorf("invalid index '%s'", index) + } + + // Hot-path for when there are no arguments + if len(args) == 0 { + return indexSchema, nil, nil + } + + // Special case the prefix scanning + if prefixScan { + prefixIndexer, ok := indexSchema.Indexer.(PrefixIndexer) + if !ok { + return indexSchema, nil, + fmt.Errorf("index '%s' does not support prefix scanning", index) + } + + val, err := prefixIndexer.PrefixFromArgs(args...) + if err != nil { + return indexSchema, nil, fmt.Errorf("index error: %v", err) + } + return indexSchema, val, err + } + + // Get the exact match index + val, err := indexSchema.Indexer.FromArgs(args...) + if err != nil { + return indexSchema, nil, fmt.Errorf("index error: %v", err) + } + return indexSchema, val, err +} + +// ResultIterator is used to iterate over a list of results +// from a Get query on a table. +type ResultIterator interface { + WatchCh() <-chan struct{} + Next() interface{} +} + +// Get is used to construct a ResultIterator over all the +// rows that match the given constraints of an index. +func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, error) { + // Get the index value to scan + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + indexRoot := indexTxn.Root() + + // Get an interator over the index + indexIter := indexRoot.Iterator() + + // Seek the iterator to the appropriate sub-set + watchCh := indexIter.SeekPrefixWatch(val) + + // Create an iterator + iter := &radixIterator{ + iter: indexIter, + watchCh: watchCh, + } + return iter, nil +} + +// Defer is used to push a new arbitrary function onto a stack which +// gets called when a transaction is committed and finished. Deferred +// functions are called in LIFO order, and only invoked at the end of +// write transactions. +func (txn *Txn) Defer(fn func()) { + txn.after = append(txn.after, fn) +} + +// radixIterator is used to wrap an underlying iradix iterator. +// This is much more efficient than a sliceIterator as we are not +// materializing the entire view. +type radixIterator struct { + iter *iradix.Iterator + watchCh <-chan struct{} +} + +func (r *radixIterator) WatchCh() <-chan struct{} { + return r.watchCh +} + +func (r *radixIterator) Next() interface{} { + _, value, ok := r.iter.Next() + if !ok { + return nil + } + return value +} diff --git a/extender/code/vendor/github.com/hashicorp/go-memdb/watch.go b/extender/code/vendor/github.com/hashicorp/go-memdb/watch.go new file mode 100644 index 000000000..a6f01213b --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-memdb/watch.go @@ -0,0 +1,129 @@ +package memdb + +import ( + "context" + "time" +) + +// WatchSet is a collection of watch channels. +type WatchSet map[<-chan struct{}]struct{} + +// NewWatchSet constructs a new watch set. +func NewWatchSet() WatchSet { + return make(map[<-chan struct{}]struct{}) +} + +// Add appends a watchCh to the WatchSet if non-nil. +func (w WatchSet) Add(watchCh <-chan struct{}) { + if w == nil { + return + } + + if _, ok := w[watchCh]; !ok { + w[watchCh] = struct{}{} + } +} + +// AddWithLimit appends a watchCh to the WatchSet if non-nil, and if the given +// softLimit hasn't been exceeded. Otherwise, it will watch the given alternate +// channel. It's expected that the altCh will be the same on many calls to this +// function, so you will exceed the soft limit a little bit if you hit this, but +// not by much. +// +// This is useful if you want to track individual items up to some limit, after +// which you watch a higher-level channel (usually a channel from start start of +// an iterator higher up in the radix tree) that will watch a superset of items. +func (w WatchSet) AddWithLimit(softLimit int, watchCh <-chan struct{}, altCh <-chan struct{}) { + // This is safe for a nil WatchSet so we don't need to check that here. + if len(w) < softLimit { + w.Add(watchCh) + } else { + w.Add(altCh) + } +} + +// Watch is used to wait for either the watch set to trigger or a timeout. +// Returns true on timeout. +func (w WatchSet) Watch(timeoutCh <-chan time.Time) bool { + if w == nil { + return false + } + + // Create a context that gets cancelled when the timeout is triggered + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + select { + case <-timeoutCh: + cancel() + case <-ctx.Done(): + } + }() + + return w.WatchCtx(ctx) == context.Canceled +} + +// WatchCtx is used to wait for either the watch set to trigger or for the +// context to be cancelled. Watch with a timeout channel can be mimicked by +// creating a context with a deadline. WatchCtx should be preferred over Watch. +func (w WatchSet) WatchCtx(ctx context.Context) error { + if w == nil { + return nil + } + + if n := len(w); n <= aFew { + idx := 0 + chunk := make([]<-chan struct{}, aFew) + for watchCh := range w { + chunk[idx] = watchCh + idx++ + } + return watchFew(ctx, chunk) + } + + return w.watchMany(ctx) +} + +// watchMany is used if there are many watchers. +func (w WatchSet) watchMany(ctx context.Context) error { + // Set up a goroutine for each watcher. + triggerCh := make(chan struct{}, 1) + watcher := func(chunk []<-chan struct{}) { + if err := watchFew(ctx, chunk); err == nil { + select { + case triggerCh <- struct{}{}: + default: + } + } + } + + // Apportion the watch channels into chunks we can feed into the + // watchFew helper. + idx := 0 + chunk := make([]<-chan struct{}, aFew) + for watchCh := range w { + subIdx := idx % aFew + chunk[subIdx] = watchCh + idx++ + + // Fire off this chunk and start a fresh one. + if idx%aFew == 0 { + go watcher(chunk) + chunk = make([]<-chan struct{}, aFew) + } + } + + // Make sure to watch any residual channels in the last chunk. + if idx%aFew != 0 { + go watcher(chunk) + } + + // Wait for a channel to trigger or timeout. + select { + case <-triggerCh: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/extender/code/vendor/github.com/hashicorp/go-memdb/watch_few.go b/extender/code/vendor/github.com/hashicorp/go-memdb/watch_few.go new file mode 100644 index 000000000..880f098b7 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/go-memdb/watch_few.go @@ -0,0 +1,117 @@ +package memdb + +//go:generate sh -c "go run watch-gen/main.go >watch_few.go" + +import( + "context" +) + +// aFew gives how many watchers this function is wired to support. You must +// always pass a full slice of this length, but unused channels can be nil. +const aFew = 32 + +// watchFew is used if there are only a few watchers as a performance +// optimization. +func watchFew(ctx context.Context, ch []<-chan struct{}) error { + select { + + case <-ch[0]: + return nil + + case <-ch[1]: + return nil + + case <-ch[2]: + return nil + + case <-ch[3]: + return nil + + case <-ch[4]: + return nil + + case <-ch[5]: + return nil + + case <-ch[6]: + return nil + + case <-ch[7]: + return nil + + case <-ch[8]: + return nil + + case <-ch[9]: + return nil + + case <-ch[10]: + return nil + + case <-ch[11]: + return nil + + case <-ch[12]: + return nil + + case <-ch[13]: + return nil + + case <-ch[14]: + return nil + + case <-ch[15]: + return nil + + case <-ch[16]: + return nil + + case <-ch[17]: + return nil + + case <-ch[18]: + return nil + + case <-ch[19]: + return nil + + case <-ch[20]: + return nil + + case <-ch[21]: + return nil + + case <-ch[22]: + return nil + + case <-ch[23]: + return nil + + case <-ch[24]: + return nil + + case <-ch[25]: + return nil + + case <-ch[26]: + return nil + + case <-ch[27]: + return nil + + case <-ch[28]: + return nil + + case <-ch[29]: + return nil + + case <-ch[30]: + return nil + + case <-ch[31]: + return nil + + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/extender/code/vendor/github.com/hashicorp/golang-lru/.gitignore b/extender/code/vendor/github.com/hashicorp/golang-lru/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/golang-lru/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/extender/code/vendor/github.com/hashicorp/golang-lru/2q.go b/extender/code/vendor/github.com/hashicorp/golang-lru/2q.go new file mode 100644 index 000000000..e474cd075 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/golang-lru/2q.go @@ -0,0 +1,223 @@ +package lru + +import ( + "fmt" + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache struct { + size int + recentSize int + + recent simplelru.LRUCache + frequent simplelru.LRUCache + recentEvict simplelru.LRUCache + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q(size int) (*TwoQueueCache, error) { + return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, fmt.Errorf("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, fmt.Errorf("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU(evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache{ + size: size, + recentSize: recentSize, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *TwoQueueCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) + return +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, nil) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *TwoQueueCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. +func (c *TwoQueueCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +// Remove removes the provided key from the cache. +func (c *TwoQueueCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +// Purge is used to completely clear the cache. +func (c *TwoQueueCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *TwoQueueCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/extender/code/vendor/github.com/hashicorp/golang-lru/LICENSE b/extender/code/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/extender/code/vendor/github.com/hashicorp/golang-lru/README.md b/extender/code/vendor/github.com/hashicorp/golang-lru/README.md new file mode 100644 index 000000000..33e58cfaf --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/golang-lru/README.md @@ -0,0 +1,25 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) + +Example +======= + +Using the LRU is very simple: + +```go +l, _ := New(128) +for i := 0; i < 256; i++ { + l.Add(i, nil) +} +if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) +} +``` diff --git a/extender/code/vendor/github.com/hashicorp/golang-lru/arc.go b/extender/code/vendor/github.com/hashicorp/golang-lru/arc.go new file mode 100644 index 000000000..555225a21 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/golang-lru/arc.go @@ -0,0 +1,257 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). +// ARC is an enhancement over the standard LRU cache in that tracks both +// frequency and recency of use. This avoids a burst in access to new +// entries from evicting the frequently used older entries. It adds some +// additional tracking overhead to a standard LRU cache, computationally +// it is roughly 2x the cost, and the extra memory overhead is linear +// with the size of the cache. ARC has been patented by IBM, but is +// similar to the TwoQueueCache (2Q) which requires setting parameters. +type ARCCache struct { + size int // Size is the total capacity of the cache + p int // P is the dynamic preference towards T1 or T2 + + t1 simplelru.LRUCache // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 + + t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 + + lock sync.RWMutex +} + +// NewARC creates an ARC of the given size +func NewARC(size int) (*ARCCache, error) { + // Create the sub LRUs + b1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + b2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + + // Initialize the ARC + c := &ARCCache{ + size: size, + p: 0, + t1: t1, + b1: b1, + t2: t2, + b2: b2, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // If the value is contained in T1 (recent), then + // promote it to T2 (frequent) + if val, ok := c.t1.Peek(key); ok { + c.t1.Remove(key) + c.t2.Add(key, val) + return val, ok + } + + // Check if the value is contained in T2 (frequent) + if val, ok := c.t2.Get(key); ok { + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *ARCCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is contained in T1 (recent), and potentially + // promote it to frequent T2 + if c.t1.Contains(key) { + c.t1.Remove(key) + c.t2.Add(key, value) + return + } + + // Check if the value is already in T2 (frequent) and update it + if c.t2.Contains(key) { + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // recently used list + if c.b1.Contains(key) { + // T1 set is too small, increase P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b2Len > b1Len { + delta = b2Len / b1Len + } + if c.p+delta >= c.size { + c.p = c.size + } else { + c.p += delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Remove from B1 + c.b1.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // frequently used list + if c.b2.Contains(key) { + // T2 set is too small, decrease P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b1Len > b2Len { + delta = b1Len / b2Len + } + if delta >= c.p { + c.p = 0 + } else { + c.p -= delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(true) + } + + // Remove from B2 + c.b2.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Keep the size of the ghost buffers trim + if c.b1.Len() > c.size-c.p { + c.b1.RemoveOldest() + } + if c.b2.Len() > c.p { + c.b2.RemoveOldest() + } + + // Add to the recently seen list + c.t1.Add(key, value) + return +} + +// replace is used to adaptively evict from either T1 or T2 +// based on the current learned value of P +func (c *ARCCache) replace(b2ContainsKey bool) { + t1Len := c.t1.Len() + if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { + k, _, ok := c.t1.RemoveOldest() + if ok { + c.b1.Add(k, nil) + } + } else { + k, _, ok := c.t2.RemoveOldest() + if ok { + c.b2.Add(k, nil) + } + } +} + +// Len returns the number of cached entries +func (c *ARCCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Len() + c.t2.Len() +} + +// Keys returns all the cached keys +func (c *ARCCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.t1.Keys() + k2 := c.t2.Keys() + return append(k1, k2...) +} + +// Remove is used to purge a key from the cache +func (c *ARCCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.t1.Remove(key) { + return + } + if c.t2.Remove(key) { + return + } + if c.b1.Remove(key) { + return + } + if c.b2.Remove(key) { + return + } +} + +// Purge is used to clear the cache +func (c *ARCCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.t1.Purge() + c.t2.Purge() + c.b1.Purge() + c.b2.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *ARCCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Contains(key) || c.t2.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.t1.Peek(key); ok { + return val, ok + } + return c.t2.Peek(key) +} diff --git a/extender/code/vendor/github.com/hashicorp/golang-lru/doc.go b/extender/code/vendor/github.com/hashicorp/golang-lru/doc.go new file mode 100644 index 000000000..2547df979 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/golang-lru/doc.go @@ -0,0 +1,21 @@ +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, +// at the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as +// well as recent usage in both the frequent and recent caches. Its +// computational overhead is comparable to TwoQueueCache, but the memory +// overhead is linear with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/extender/code/vendor/github.com/hashicorp/golang-lru/go.mod b/extender/code/vendor/github.com/hashicorp/golang-lru/go.mod new file mode 100644 index 000000000..824cb97e8 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/golang-lru/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/golang-lru diff --git a/extender/code/vendor/github.com/hashicorp/golang-lru/lru.go b/extender/code/vendor/github.com/hashicorp/golang-lru/lru.go new file mode 100644 index 000000000..1cbe04b7d --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/golang-lru/lru.go @@ -0,0 +1,116 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + lru simplelru.LRUCache + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New(size int) (*Cache, error) { + return NewWithEvict(size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { + lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) + if err != nil { + return nil, err + } + c := &Cache{ + lru: lru, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *Cache) Purge() { + c.lock.Lock() + c.lru.Purge() + c.lock.Unlock() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache) Add(key, value interface{}) (evicted bool) { + c.lock.Lock() + evicted = c.lru.Add(key, value) + c.lock.Unlock() + return evicted +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + value, ok = c.lru.Get(key) + c.lock.Unlock() + return value, ok +} + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache) Contains(key interface{}) bool { + c.lock.RLock() + containKey := c.lru.Contains(key) + c.lock.RUnlock() + return containKey +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + value, ok = c.lru.Peek(key) + c.lock.RUnlock() + return value, ok +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.lru.Contains(key) { + return true, false + } + evicted = c.lru.Add(key, value) + return false, evicted +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key interface{}) { + c.lock.Lock() + c.lru.Remove(key) + c.lock.Unlock() +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + c.lock.Lock() + c.lru.RemoveOldest() + c.lock.Unlock() +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache) Keys() []interface{} { + c.lock.RLock() + keys := c.lru.Keys() + c.lock.RUnlock() + return keys +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.RLock() + length := c.lru.Len() + c.lock.RUnlock() + return length +} diff --git a/extender/code/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/extender/code/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 000000000..5673773b2 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,161 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/extender/code/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/extender/code/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 000000000..74c707744 --- /dev/null +++ b/extender/code/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,36 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Check if a key exsists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clear all cache entries + Purge() +} diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/.gitignore b/extender/code/vendor/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 000000000..5091fb073 --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +/jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/.travis.yml b/extender/code/vendor/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 000000000..730c7fa51 --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,17 @@ +language: go + +sudo: false + +go: + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + +install: go get -v -t ./... +script: make test diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/LICENSE b/extender/code/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 000000000..b03310a91 --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/Makefile b/extender/code/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 000000000..a828d2848 --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/README.md b/extender/code/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 000000000..110ad7999 --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,87 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +go-jmespath is a GO implementation of JMESPath, +which is a query language for JSON. It will take a JSON +document and transform it into another JSON document +through a JMESPath expression. + +Using go-jmespath is really easy. There's a single function +you use, `jmespath.search`: + + +```go +> import "github.com/jmespath/go-jmespath" +> +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.Search("foo.bar.baz[2]", data) +result = 2 +``` + +In the example we gave the ``search`` function input data of +`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath +expression `foo.bar.baz[2]`, and the `search` function evaluated +the expression against the input data to produce the result ``2``. + +The JMESPath language can do a lot more than select an element +from a list. Here are a few more examples: + +```go +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo.bar", data) +result = { "baz": [ 0, 1, 2, 3, 4 ] } + + +> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, + {"first": "c", "last": "d"}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search({"foo[*].first", data) +result [ 'a', 'c' ] + + +> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, + {"age": 30}, {"age": 35}, + {"age": 40}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo[?age > `30`]") +result = [ { age: 35 }, { age: 40 } ] +``` + +You can also pre-compile your query. This is usefull if +you are going to run multiple searches with it: + +```go + > var jsondata = []byte(`{"foo": "bar"}`) + > var data interface{} + > err := json.Unmarshal(jsondata, &data) + > precompiled, err := Compile("foo") + > if err != nil{ + > // ... handle the error + > } + > result, err := precompiled.Search(data) + result = "bar" +``` + +## More Resources + +The example above only show a small amount of what +a JMESPath expression can do. If you want to take a +tour of the language, the *best* place to go is the +[JMESPath Tutorial](http://jmespath.org/tutorial.html). + +One of the best things about JMESPath is that it is +implemented in many different programming languages including +python, ruby, php, lua, etc. To see a complete list of libraries, +check out the [JMESPath libraries page](http://jmespath.org/libraries.html). + +And finally, the full JMESPath specification can be found +on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/api.go b/extender/code/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 000000000..010efe9bf --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JMESPath is the representation of a compiled JMES path query. A JMESPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/extender/code/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 000000000..1cd2d239c --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/functions.go b/extender/code/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 000000000..9b7cd89b4 --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/go.mod b/extender/code/vendor/github.com/jmespath/go-jmespath/go.mod new file mode 100644 index 000000000..aa1e3f1c9 --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/go.mod @@ -0,0 +1,5 @@ +module github.com/jmespath/go-jmespath + +go 1.14 + +require github.com/stretchr/testify v1.5.1 diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/go.sum b/extender/code/vendor/github.com/jmespath/go-jmespath/go.sum new file mode 100644 index 000000000..331fa6982 --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/interpreter.go b/extender/code/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 000000000..13c74604c --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/lexer.go b/extender/code/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 000000000..817900c8f --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/parser.go b/extender/code/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 000000000..4abc303ab --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expression: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/extender/code/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 000000000..dae79cbdf --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/extender/code/vendor/github.com/jmespath/go-jmespath/util.go b/extender/code/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 000000000..ddc1b7d7d --- /dev/null +++ b/extender/code/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/extender/code/vendor/github.com/json-iterator/go/.codecov.yml b/extender/code/vendor/github.com/json-iterator/go/.codecov.yml new file mode 100644 index 000000000..955dc0be5 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/.codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "output_tests/.*" + diff --git a/extender/code/vendor/github.com/json-iterator/go/.gitignore b/extender/code/vendor/github.com/json-iterator/go/.gitignore new file mode 100644 index 000000000..15556530a --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/.gitignore @@ -0,0 +1,4 @@ +/vendor +/bug_test.go +/coverage.txt +/.idea diff --git a/extender/code/vendor/github.com/json-iterator/go/.travis.yml b/extender/code/vendor/github.com/json-iterator/go/.travis.yml new file mode 100644 index 000000000..449e67cd0 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.8.x + - 1.x + +before_install: + - go get -t -v ./... + +script: + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/extender/code/vendor/github.com/json-iterator/go/Gopkg.lock b/extender/code/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 000000000..c8a9fbb38 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/extender/code/vendor/github.com/json-iterator/go/Gopkg.toml b/extender/code/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 000000000..313a0f887 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.1" diff --git a/extender/code/vendor/github.com/json-iterator/go/LICENSE b/extender/code/vendor/github.com/json-iterator/go/LICENSE new file mode 100644 index 000000000..2cf4f5ab2 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/extender/code/vendor/github.com/json-iterator/go/README.md b/extender/code/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 000000000..52b111d5f --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,87 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --------------- | ----------- | ---------------- | ---------------- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +Always benchmark with your own workload. +The result depends heavily on the data input. + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +- [thockin](https://github.com/thockin) +- [mattn](https://github.com/mattn) +- [cch123](https://github.com/cch123) +- [Oleg Shaldybin](https://github.com/olegshaldybin) +- [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/extender/code/vendor/github.com/json-iterator/go/adapter.go b/extender/code/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 000000000..92d2cc4a3 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,150 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString is a convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + c := iter.nextToken() + if c == 0 { + return false + } + iter.unreadByte() + return c != ']' && c != '}' +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any.go b/extender/code/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 000000000..f6b8aeab0 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,325 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + if any == nil { + stream.WriteNil() + return + } + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any_array.go b/extender/code/vendor/github.com/json-iterator/go/any_array.go new file mode 100644 index 000000000..0449e9aa4 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any_bool.go b/extender/code/vendor/github.com/json-iterator/go/any_bool.go new file mode 100644 index 000000000..9452324af --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any_float.go b/extender/code/vendor/github.com/json-iterator/go/any_float.go new file mode 100644 index 000000000..35fdb0949 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any_int32.go b/extender/code/vendor/github.com/json-iterator/go/any_int32.go new file mode 100644 index 000000000..1b56f3991 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any_int64.go b/extender/code/vendor/github.com/json-iterator/go/any_int64.go new file mode 100644 index 000000000..c440d72b6 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any_invalid.go b/extender/code/vendor/github.com/json-iterator/go/any_invalid.go new file mode 100644 index 000000000..1d859eac3 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any_nil.go b/extender/code/vendor/github.com/json-iterator/go/any_nil.go new file mode 100644 index 000000000..d04cb54c1 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any_number.go b/extender/code/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 000000000..9d1e901a6 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any_object.go b/extender/code/vendor/github.com/json-iterator/go/any_object.go new file mode 100644 index 000000000..c44ef5c98 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any_str.go b/extender/code/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 000000000..1f12f6612 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any_uint32.go b/extender/code/vendor/github.com/json-iterator/go/any_uint32.go new file mode 100644 index 000000000..656bbd33d --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/extender/code/vendor/github.com/json-iterator/go/any_uint64.go b/extender/code/vendor/github.com/json-iterator/go/any_uint64.go new file mode 100644 index 000000000..7df2fce33 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/extender/code/vendor/github.com/json-iterator/go/build.sh b/extender/code/vendor/github.com/json-iterator/go/build.sh new file mode 100644 index 000000000..b45ef6883 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/extender/code/vendor/github.com/json-iterator/go/config.go b/extender/code/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 000000000..2adcdc3b7 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,375 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + encoderExtension Extension + decoderExtension Extension + extraExtensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + api.encoderExtension = encoderExtension + api.decoderExtension = decoderExtension + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + for _, extension := range extraExtensions { + api.RegisterExtension(extension) + } + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + defer cfg.ReturnIterator(iter) + iter.Read() + if iter.Error != nil && iter.Error != io.EOF { + stream.WriteRaw("null") + } else { + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extraExtensions = append(cfg.extraExtensions, extension) + copied := cfg.configBeforeFrozen + cfg.configBeforeFrozen = copied +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/extender/code/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/extender/code/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md new file mode 100644 index 000000000..3095662b0 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md @@ -0,0 +1,7 @@ +| json type \ dest type | bool | int | uint | float |string| +| --- | --- | --- | --- |--|--| +| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| +| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| +| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| +| object | true | 0 | 0 |0|originnal json| +| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/extender/code/vendor/github.com/json-iterator/go/go.mod b/extender/code/vendor/github.com/json-iterator/go/go.mod new file mode 100644 index 000000000..e05c42ff5 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/go.mod @@ -0,0 +1,11 @@ +module github.com/json-iterator/go + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 + github.com/google/gofuzz v1.0.0 + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 + github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 + github.com/stretchr/testify v1.3.0 +) diff --git a/extender/code/vendor/github.com/json-iterator/go/go.sum b/extender/code/vendor/github.com/json-iterator/go/go.sum new file mode 100644 index 000000000..d778b5a14 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/go.sum @@ -0,0 +1,14 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/extender/code/vendor/github.com/json-iterator/go/iter.go b/extender/code/vendor/github.com/json-iterator/go/iter.go new file mode 100644 index 000000000..29b31cf78 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/iter.go @@ -0,0 +1,349 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + depth int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + depth: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + depth: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + depth: 0, + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + iter.depth = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + iter.depth = 0 + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/extender/code/vendor/github.com/json-iterator/go/iter_array.go b/extender/code/vendor/github.com/json-iterator/go/iter_array.go new file mode 100644 index 000000000..204fe0e09 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/iter_array.go @@ -0,0 +1,64 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + return iter.decrementDepth() + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/extender/code/vendor/github.com/json-iterator/go/iter_float.go b/extender/code/vendor/github.com/json-iterator/go/iter_float.go new file mode 100644 index 000000000..b9754638e --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/iter_float.go @@ -0,0 +1,339 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/extender/code/vendor/github.com/json-iterator/go/iter_int.go b/extender/code/vendor/github.com/json-iterator/go/iter_int.go new file mode 100644 index 000000000..214232035 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/iter_int.go @@ -0,0 +1,345 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/extender/code/vendor/github.com/json-iterator/go/iter_object.go b/extender/code/vendor/github.com/json-iterator/go/iter_object.go new file mode 100644 index 000000000..58ee89c84 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/iter_object.go @@ -0,0 +1,267 @@ +package jsoniter + +import ( + "fmt" + "strings" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +// CaseInsensitive +func (iter *Iterator) readFieldHash() int64 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c != '"' { + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 + } + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } +} + +func calcHash(str string, caseSensitive bool) int64 { + if !caseSensitive { + str = strings.ToLower(str) + } + hash := int64(0x811c9dc5) + for _, b := range []byte(str) { + hash ^= int64(b) + hash *= 0x1000193 + } + return int64(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var field string + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/extender/code/vendor/github.com/json-iterator/go/iter_skip.go b/extender/code/vendor/github.com/json-iterator/go/iter_skip.go new file mode 100644 index 000000000..e91eefb15 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/iter_skip.go @@ -0,0 +1,130 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +// SkipAndAppendBytes skips next JSON element and appends its content to +// buffer, returning the result. +func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { + iter.startCaptureTo(buf, iter.head) + iter.Skip() + return iter.stopCapture() +} + +func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = buf +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + return append(captured, remaining...) +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/extender/code/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/extender/code/vendor/github.com/json-iterator/go/iter_skip_sloppy.go new file mode 100644 index 000000000..9303de41e --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -0,0 +1,163 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + if !iter.incrementDepth() { + return + } + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case ']': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + if !iter.incrementDepth() { + return + } + + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case '}': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/extender/code/vendor/github.com/json-iterator/go/iter_skip_strict.go b/extender/code/vendor/github.com/json-iterator/go/iter_skip_strict.go new file mode 100644 index 000000000..6cf66d043 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -0,0 +1,99 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import ( + "fmt" + "io" +) + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + if iter.Error != nil && iter.Error != io.EOF { + return + } + iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = nil + iter.ReadBigFloat() + } + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/extender/code/vendor/github.com/json-iterator/go/iter_str.go b/extender/code/vendor/github.com/json-iterator/go/iter_str.go new file mode 100644 index 000000000..adc487ea8 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/iter_str.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/extender/code/vendor/github.com/json-iterator/go/jsoniter.go b/extender/code/vendor/github.com/json-iterator/go/jsoniter.go new file mode 100644 index 000000000..c2934f916 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/jsoniter.go @@ -0,0 +1,18 @@ +// Package jsoniter implements encoding and decoding of JSON as defined in +// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. +// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter +// and variable type declarations (if any). +// jsoniter interfaces gives 100% compatibility with code using standard lib. +// +// "JSON and Go" +// (https://golang.org/doc/articles/json_and_go.html) +// gives a description of how Marshal/Unmarshal operate +// between arbitrary or predefined json objects and bytes, +// and it applies to jsoniter.Marshal/Unmarshal as well. +// +// Besides, jsoniter.Iterator provides a different set of interfaces +// iterating given bytes/string/reader +// and yielding parsed elements one by one. +// This set of interfaces reads input as required and gives +// better performance. +package jsoniter diff --git a/extender/code/vendor/github.com/json-iterator/go/pool.go b/extender/code/vendor/github.com/json-iterator/go/pool.go new file mode 100644 index 000000000..e2389b56c --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/pool.go @@ -0,0 +1,42 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil + stream.Error = nil + stream.Attachment = nil + cfg.streamPool.Put(stream) +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + cfg.iteratorPool.Put(iter) +} diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect.go b/extender/code/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 000000000..74974ba74 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,337 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect_array.go b/extender/code/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 000000000..13a0b7b08 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect_dynamic.go b/extender/code/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 000000000..8b6bc8b43 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect_extension.go b/extender/code/vendor/github.com/json-iterator/go/reflect_extension.go new file mode 100644 index 000000000..74a97bfe5 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect_extension.go @@ -0,0 +1,483 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + Type reflect2.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name() == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field reflect2.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + +// CreateEncoder No-op +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + decoder := ctx.decoderExtension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder = typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] + if decoder != nil { + return &OptionalDecoder{ptrType.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + encoder := ctx.encoderExtension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder = typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag && !field.Anonymous() { + continue + } + if tag == "-" || field.Name() == "_" { + continue + } + tagParts := strings.Split(tag, ",") + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + } + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) + } + binding := &Binding{ + Field: field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) +} +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + structDescriptor := &StructDescriptor{ + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) + ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) + for _, extension := range ctx.extraExtensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, ctx.frozenConfig) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type().Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect_json_number.go b/extender/code/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 000000000..98d45c1ec --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/extender/code/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 000000000..f2619936c --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,60 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect_map.go b/extender/code/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 000000000..582967130 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subStream.Attachment = stream.Attachment + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + key, elem := mapIter.UnsafeNext() + subStreamIndex := subStream.Buffered() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer()[subStreamIndex:] + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer()[subStreamIndex:], + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + if subStream.Error != nil && stream.Error == nil { + stream.Error = subStream.Error + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect_marshaler.go b/extender/code/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 000000000..3e21f3756 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,225 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "unsafe" + + "github.com/modern-go/reflect2" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect_native.go b/extender/code/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 000000000..f88722d14 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,453 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + if codec.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + src := *((*[]byte)(ptr)) + encoding := base64.StdEncoding + stream.writeByte('"') + if len(src) != 0 { + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + } + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect_optional.go b/extender/code/vendor/github.com/json-iterator/go/reflect_optional.go new file mode 100644 index 000000000..fa71f4748 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect_optional.go @@ -0,0 +1,129 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "unsafe" +) + +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) + encoder := &OptionalEncoder{elemEncoder} + return encoder +} + +type OptionalDecoder struct { + ValueType reflect2.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.ValueType.UnsafeNew() + decoder.ValueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect2.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.valueType.UnsafeNew() + decoder.valueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true + } + return encoder.ValueEncoder.IsEmpty(dePtr) +} + +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) +} diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect_slice.go b/extender/code/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 000000000..9441d79df --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/extender/code/vendor/github.com/json-iterator/go/reflect_struct_decoder.go new file mode 100644 index 000000000..d7eb0eb5c --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -0,0 +1,1092 @@ +package jsoniter + +import ( + "fmt" + "io" + "strings" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { + return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} + } + knownHash := map[int64]struct{}{ + 0: {}, + } + + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int64 + var fieldHash2 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldName10 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields, false} +} + +type generalStructDecoder struct { + typ reflect2.Type + fields map[string]*structFieldDecoder + disallowUnknownFields bool +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + var c byte + for c = ','; c == ','; c = iter.nextToken() { + decoder.decodeOneField(ptr, iter) + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } + iter.decrementDepth() +} + +func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { + var field string + var fieldDecoder *structFieldDecoder + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.ReadStringAsSlice() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } else { + field = iter.ReadString() + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } + if fieldDecoder == nil { + if decoder.disallowUnknownFields { + msg := "found unknown field: " + field + iter.ReportError("ReadObject", msg) + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + iter.Skip() + return + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + fieldDecoder.Decode(ptr, iter) +} + +type skipObjectDecoder struct { + typ reflect2.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect2.Type + fieldHash int64 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type twoFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type threeFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fourFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fiveFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sixFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sevenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type eightFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type nineFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type tenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder + fieldHash10 int64 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type structFieldDecoder struct { + field reflect2.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := decoder.field.UnsafeGet(ptr) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} diff --git a/extender/code/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/extender/code/vendor/github.com/json-iterator/go/reflect_struct_encoder.go new file mode 100644 index 000000000..152e3ef5a --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -0,0 +1,211 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "unsafe" +) + +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +type structFieldEncoder struct { + field reflect2.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := encoder.field.UnsafeGet(ptr) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := encoder.field.UnsafeGet(ptr) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + +type structEncoder struct { + typ reflect2.Type + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + tempStream.Attachment = stream.Attachment + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/extender/code/vendor/github.com/json-iterator/go/stream.go b/extender/code/vendor/github.com/json-iterator/go/stream.go new file mode 100644 index 000000000..23d8a3ad6 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/stream.go @@ -0,0 +1,210 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, 0, bufSize), + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.buf = stream.buf[:0] +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return cap(stream.buf) - len(stream.buf) +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return len(stream.buf) +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf +} + +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + stream.buf = append(stream.buf, p...) + if stream.out != nil { + nn, err = stream.out.Write(stream.buf) + stream.buf = stream.buf[nn:] + return + } + return len(p), nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + stream.buf = append(stream.buf, c) +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + stream.buf = append(stream.buf, c1, c2) +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + stream.buf = append(stream.buf, c1, c2, c3) +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4) +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4, c5) +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + _, err := stream.out.Write(stream.buf) + if err != nil { + if stream.Error == nil { + stream.Error = err + } + return err + } + stream.buf = stream.buf[:0] + return nil +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.buf = append(stream.buf, s...) +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + for i := 0; i < toWrite; i++ { + stream.buf = append(stream.buf, ' ') + } +} diff --git a/extender/code/vendor/github.com/json-iterator/go/stream_float.go b/extender/code/vendor/github.com/json-iterator/go/stream_float.go new file mode 100644 index 000000000..826aa594a --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/stream_float.go @@ -0,0 +1,111 @@ +package jsoniter + +import ( + "fmt" + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} diff --git a/extender/code/vendor/github.com/json-iterator/go/stream_int.go b/extender/code/vendor/github.com/json-iterator/go/stream_int.go new file mode 100644 index 000000000..d1059ee4c --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/stream_int.go @@ -0,0 +1,190 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(space []byte, v uint32) []byte { + start := v >> 24 + if start == 0 { + space = append(space, byte(v>>16), byte(v>>8)) + } else if start == 1 { + space = append(space, byte(v>>8)) + } + space = append(space, byte(v)) + return space +} + +func writeBuf(buf []byte, v uint32) []byte { + return append(buf, byte(v>>16), byte(v>>8), byte(v)) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint8(nval) + } + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint16(nval) + } + stream.WriteUint16(val) +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + } else { + r3 := q2 - q3*1000 + stream.buf = append(stream.buf, byte(q3+'0')) + stream.buf = writeBuf(stream.buf, digits[r3]) + } + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint32(nval) + } + stream.WriteUint32(val) +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q3]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q4]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q5]) + } else { + stream.buf = writeFirstBuf(stream.buf, digits[q6]) + r6 := q5 - q6*1000 + stream.buf = writeBuf(stream.buf, digits[r6]) + } + stream.buf = writeBuf(stream.buf, digits[r5]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint64(nval) + } + stream.WriteUint64(val) +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/extender/code/vendor/github.com/json-iterator/go/stream_str.go b/extender/code/vendor/github.com/json-iterator/go/stream_str.go new file mode 100644 index 000000000..54c2ba0b3 --- /dev/null +++ b/extender/code/vendor/github.com/json-iterator/go/stream_str.go @@ -0,0 +1,372 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML